source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_bitmap_masker_template.c | //------------------------------------------------------------------------------
// GB_bitmap_masker_template: phase2 for R = masker (C, M, Z), R is bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Computes C<M>=Z or C<!M>=Z, returning the result in R, which is bitmap.
// The input matrix C is not modified. Effectively, this computes R=C and then
// R<M>=Z or R<!M>=Z. If the C_replace descriptor is enabled, then C has
// already been cleared, and is an empty (but non-NULL) matrix.
// phase2: computes R in a single pass
// C is sparse or hypersparse. Z is bitmap or full. R is bitmap.
// M has any sparsity structure.
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse bitmap bitmap
// sparse sparse full bitmap
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// FUTURE:: add special cases for C==Z, C==M, and Z==M aliases
{
int64_t p, rnvals = 0 ;
ASSERT (R_sparsity == GxB_BITMAP) ;
ASSERT (C_is_sparse || C_is_hyper) ;
ASSERT (Z_is_bitmap || Z_is_full) ;
//--------------------------------------------------------------------------
// scatter C into the R bitmap
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C input to R_bitmap_masker", GB0) ;
GB_SLICE_MATRIX (C, 8, chunk) ;
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) \
reduction(+:rnvals)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Cslice [taskid] ;
int64_t klast = klast_Cslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of C(:,k) for this task
int64_t j = GBH (Ch, k) ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, taskid, k, kfirst,
klast, pstart_Cslice, Cp, vlen) ;
int64_t pR_start = j * vlen ;
// traverse over C(:,j), the kth vector of C
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// R(i,j) = C(i,j)
int64_t i = Ci [pC] ;
int64_t pR = pR_start + i ;
Rb [pR] = 1 ;
rnvals++ ;
#ifndef GB_ISO_MASKER
memcpy (Rx + (pR)*rsize, Cx + (C_iso? 0:(pC)*rsize), rsize) ;
#endif
}
}
}
R->nvals = rnvals ;
ASSERT_MATRIX_OK (R, "R with C scattered", GB0) ;
//--------------------------------------------------------------------------
// R<M>=Z or R<!M>=Z
//--------------------------------------------------------------------------
if (M_is_sparse || M_is_hyper)
{
//----------------------------------------------------------------------
// Method05: M is sparse or hypersparse, Z bitmap/full, R bitmap
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse bitmap bitmap
// sparse sparse full bitmap
ASSERT (Mask_comp) ;
//----------------------------------------------------------------------
// scatter M into the R bitmap
//----------------------------------------------------------------------
GB_SLICE_MATRIX (M, 8, chunk) ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pR_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark R(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pR_start + i ;
Rb [p] += 2 ;
}
}
}
}
//----------------------------------------------------------------------
// R<!M>=Z, using M scattered into R
//----------------------------------------------------------------------
// Rb is marked as follows:
// 0: R(i,j) is not present, and M(i,j) is false
// 1: R(i,j) is present, and M(i,j) is false
// 2: R(i,j) is not present, and M(i,j) is true
// 3: R(i,j) is present, and M(i,j) is true
// M is complemented, but shown uncomplemented in the table below since
// that is how it is scattered into R.
// Rb R(i,j) M(i,j) Z(i,j) modification to R(i,j)
// 0 - 0 zij R(i,j) = Z(i,j), new value, rnvals++
// 0 - 0 - do nothing
// 1 rij 0 zij R(i,j) = Z(i,j), overwrite
// 1 rij 0 - delete R(i,j), rnvals--
// 2 - 1 zij do nothing, set Rb to 0
// 2 - 1 - do nothing, set Rb to 0
// 3 rij 1 zij keep R(i,j), set Rb to 1
// 3 rij 1 - keep R(i,j), set Rb to 1
#pragma omp parallel for num_threads(R_nthreads) schedule(static) \
reduction(+:rnvals)
for (p = 0 ; p < rnz ; p++)
{
int8_t r = Rb [p] ;
int8_t z = GBB (Zb, p) ;
switch (r)
{
case 0 : // R(i,j) not present, M(i,j) false
if (z)
{
// R(i,j) = Z(i,j), insert new value
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
Rb [p] = 1 ;
rnvals++ ;
}
break ;
case 1 : // R(i,j) present, M(i,j) false
if (z)
{
// R(i,j) = Z(i,j), update prior value
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
}
else
{
// delete R(i,j)
Rb [p] = 0 ;
rnvals-- ;
}
break ;
case 2 : // R(i,j) not present, M(i,j) true
Rb [p] = 0 ;
break ;
case 3 : // R(i,j) present, M(i,j) true
Rb [p] = 1 ;
break ;
default: ;
}
}
}
else
{
//----------------------------------------------------------------------
// Method06: M and Z are bitmap or full, R is bitmap
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse bitmap bitmap bitmap
// sparse bitmap full bitmap
// sparse full bitmap bitmap
// sparse full full bitmap
// Rb R(i,j) M(i,j) Z(i,j) modification to R(i,j)
// 0 - 0 zij do nothing
// 0 - 0 - do nothing
// 1 rij 0 zij do nothing
// 1 rij 0 - do nothing
// 0 - 1 zij R(i,j) = Z(i,j), rnvals++
// 0 - 1 - do nothing
// 1 rij 1 zij R(i,j) = Z(i,j), no change to rnvals
// 1 rij 1 - delete, rnvals--
#pragma omp parallel for num_threads(R_nthreads) schedule(static) \
reduction(+:rnvals)
for (p = 0 ; p < rnz ; p++)
{
bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
int8_t z = GBB (Zb, p) ;
int8_t r = Rb [p] ;
if (r)
{
if (z)
{
// R(i,j) = Z(i,j), update, no change to rnvals
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize);
#endif
}
else
{
// delete R(i,j)
Rb [p] = 0 ;
rnvals-- ;
}
}
else if (z)
{
// R(i,j) = Z(i,j), new entry
#ifndef GB_ISO_MASKER
memcpy (Rx +(p)*rsize, Zx +(Z_iso? 0:(p)*rsize), rsize) ;
#endif
Rb [p] = 1 ;
rnvals++ ;
}
}
}
}
R->nvals = rnvals ;
}
|
c3_fmt.c | /*
* Generic crypt(3) support, as well as support for glibc's crypt_r(3) and
* Solaris' MT-safe crypt(3C) with OpenMP parallelization.
*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2009-2015 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#if HAVE_CRYPT
/* if this comes after the #define crap below, there are often
* problems with strdup or other things not being defined. We
* move this block of includes to above the _XOPEN_* defines
*/
#if STRING_WITH_STRINGS
#include <string.h>
#include <strings.h>
#elif HAVE_STRING_H
#include <string.h>
#elif HAVE_STRINGS_H
#include <strings.h>
#endif
#if !AC_BUILT
#include <string.h>
#ifndef _MSC_VER
#include <strings.h>
#endif
#undef _XOPEN_VERSION
#undef _XOPEN_SOURCE
#undef _XOPEN_SOURCE_EXTENDED
#undef _GNU_SOURCE
#define _XOPEN_SOURCE 4 /* for crypt(3) */
#define _XOPEN_SOURCE_EXTENDED 1 /* for OpenBSD */
#define _XOPEN_VERSION 4
#define _XPG4_2
#define _GNU_SOURCE 1 /* for crypt_r(3) */
#include <stdio.h>
#ifdef __CYGWIN__
#include <crypt.h>
#endif
#if defined(_OPENMP) && defined(__GLIBC__)
#include <crypt.h>
#else
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#endif
#endif
#if (!AC_BUILT && defined(HAVE_CRYPT))
#undef HAVE_CRYPT_H
#define HAVE_CRYPT_H 1
#endif
#if HAVE_CRYPT_H
#include <crypt.h>
#endif
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#if defined(_OPENMP)
#include <omp.h> /* for omp_get_thread_num() */
#endif
#include "options.h"
#include "arch.h"
#include "misc.h"
#include "params.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "loader.h"
#include "john.h"
#ifdef HAVE_MPI
#include "john-mpi.h"
#endif
#include "memdbg.h"
#define FORMAT_LABEL "crypt"
#define FORMAT_NAME "generic crypt(3)"
#define ALGORITHM_NAME "?/" ARCH_BITS_STR
#define BENCHMARK_COMMENT " DES"
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 72
#define BINARY_SIZE 128
#define BINARY_ALIGN 1
#define SALT_SIZE BINARY_SIZE
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 96
#define MAX_KEYS_PER_CRYPT 96
static struct fmt_tests tests[] = {
{"CCNf8Sbh3HDfQ", "U*U*U*U*"},
{"CCX.K.MFy4Ois", "U*U***U"},
{"CC4rMpbg9AMZ.", "U*U***U*"},
{"XXxzOu6maQKqQ", "*U*U*U*U"},
{"SDbsugeBiC58A", ""},
{NULL}
};
static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1];
static char saved_salt[SALT_SIZE];
static char crypt_out[MAX_KEYS_PER_CRYPT][BINARY_SIZE];
#if defined(_OPENMP) && defined(__GLIBC__)
#define MAX_THREADS MAX_KEYS_PER_CRYPT
/* We assume that this is zero-initialized (all NULL pointers) */
static struct crypt_data *crypt_data[MAX_THREADS];
#endif
static void init(struct fmt_main *self)
{
if (options.subformat) {
int i;
char *salt = tests[0].ciphertext;
#if defined(_OPENMP) && defined(__GLIBC__)
struct crypt_data data;
data.initialized = 0;
#endif
/*
* Allow
* ./john --list=format-tests --format=crypt --subformat=md5crypt
* in addition to
* ./john --test --format=crypt --subformat=md5crypt
*
* That's why, don't require FLG_TEST_CHK to be set.
*/
if (options.flags & FLG_PASSWD) {
fprintf(stderr,
"\n%s: --subformat option is only for --test or --list=format-tests\n", FORMAT_LABEL);
error();
}
if (!strcmp(options.subformat, "?")) {
fprintf(stderr, "Subformat may either be a verbatim salt, or: descrypt, md5crypt, bcrypt, sha256crypt, sha512crypt, sun-md5\n\n");
error();
} else if (!strcasecmp(options.subformat, "md5crypt") ||
!strcasecmp(options.subformat, "md5")) {
static struct fmt_tests tests[] = {
{"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"},
{"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"},
{"$1$$qRPK7m23GJusamGpoGLby/", ""},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " MD5";
salt = "$1$dXc3I7Rw$";
} else if (!strcasecmp(options.subformat, "sunmd5") ||
!strcasecmp(options.subformat, "sun-md5")) {
static struct fmt_tests tests[] = {
{"$md5$rounds=904$Vc3VgyFx44iS8.Yu$Scf90iLWN6O6mT9TA06NK/", "test"},
{"$md5$rounds=904$ZZZig8GS.S0pRNhc$dw5NMYJoxLlnFq4E.phLy.", "Don41dL33"},
{"$md5$rounds=904$zSuVTn567UJLv14u$q2n2ZBFwKg2tElFBIzUq/0", "J4ck!3Wood"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SunMD5";
salt = "$md5$rounds=904$Vc3VgyFx44iS8.Yu$dummy";
} else if ((!strcasecmp(options.subformat, "sha256crypt")) ||
(!strcasecmp(options.subformat, "sha-256")) ||
(!strcasecmp(options.subformat, "sha256"))) {
static struct fmt_tests tests[] = {
{"$5$LKO/Ute40T3FNF95$U0prpBQd4PloSGU0pnpM4z9wKn4vZ1.jsrzQfPqxph9", "U*U*U*U*"},
{"$5$LKO/Ute40T3FNF95$fdgfoJEBoMajNxCv3Ru9LyQ0xZgv0OBMQoq80LQ/Qd.", "U*U***U"},
{"$5$LKO/Ute40T3FNF95$8Ry82xGnnPI/6HtFYnvPBTYgOL23sdMXn8C29aO.x/A", "U*U***U*"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SHA-256 rounds=5000";
salt = "$5$LKO/Ute40T3FNF95$";
} else if ((!strcasecmp(options.subformat, "sha512crypt")) ||
(!strcasecmp(options.subformat, "sha-512")) ||
(!strcasecmp(options.subformat, "sha512"))) {
static struct fmt_tests tests[] = {
{"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"},
{"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"},
{"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SHA-512 rounds=5000";
salt = "$6$LKO/Ute40T3FNF95$";
} else if ((!strcasecmp(options.subformat, "bf")) ||
(!strcasecmp(options.subformat, "blowfish")) ||
(!strcasecmp(options.subformat, "bcrypt"))) {
static struct fmt_tests tests[] = {
{"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW","U*U"},
{"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK","U*U*"},
{"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a","U*U*U"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " BF x32";
salt = "$2a$05$AD6y0uWY62Xk2TXZ";
} else if (!strcasecmp(options.subformat, "descrypt") ||
!strcasecmp(options.subformat, "des")) {
salt = "CC";
} else {
char *p = mem_alloc_tiny(strlen(options.subformat) + 2,
MEM_ALIGN_NONE);
strcpy(p, " ");
strcat(p, options.subformat);
self->params.benchmark_comment = p;
salt = options.subformat;
/* turn off many salts test, since we are not updating the */
/* params.tests structure data. */
self->params.benchmark_length = -1;
}
for (i = 0; i < 5; i++)
{
char *c;
#if defined(_OPENMP) && defined(__GLIBC__)
c = crypt_r(tests[i].plaintext, salt, &data);
#else
c = crypt(tests[i].plaintext, salt);
#endif
if (c && strlen(c) >= 7)
tests[i].ciphertext = strdup(c);
else {
fprintf(stderr, "%s not supported on this system\n",
options.subformat);
error();
}
}
if (strlen(tests[0].ciphertext) == 13 &&
strcasecmp(options.subformat, "descrypt") &&
strcasecmp(options.subformat, "des")) {
fprintf(stderr, "%s not supported on this system\n",
options.subformat);
error();
}
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int length, count_base64, count_base64_2, id, pw_length;
char pw[PLAINTEXT_LENGTH + 1], *new_ciphertext;
/* We assume that these are zero-initialized */
static char sup_length[BINARY_SIZE], sup_id[0x80];
length = count_base64 = count_base64_2 = 0;
while (ciphertext[length]) {
if (atoi64[ARCH_INDEX(ciphertext[length])] != 0x7F) {
count_base64++;
if (length >= 2)
count_base64_2++;
}
length++;
}
if (length < 13 || length >= BINARY_SIZE)
return 0;
id = 0;
if (length == 13 && count_base64 == 13) /* valid salt */
id = 1;
else
if (length == 13 && count_base64_2 == 11) /* invalid salt */
id = 2;
else
if (length >= 13 &&
count_base64_2 >= length - 2 && /* allow for invalid salt */
(length - 2) % 11 == 0)
id = 3;
else
if (length == 20 && count_base64 == 19 && ciphertext[0] == '_')
id = 4;
else
if (ciphertext[0] == '$') {
id = (unsigned char)ciphertext[1];
if (id <= 0x20 || id >= 0x80)
id = 9;
} else
if (ciphertext[0] == '*' || ciphertext[0] == '!') /* likely locked */
id = 10;
/* Previously detected as supported */
if (sup_length[length] > 0 && sup_id[id] > 0)
return 1;
/* Previously detected as unsupported */
if (sup_length[length] < 0 && sup_id[id] < 0)
return 0;
pw_length = ((length - 2) / 11) << 3;
if (pw_length >= sizeof(pw))
pw_length = sizeof(pw) - 1;
memcpy(pw, ciphertext, pw_length); /* reuse the string, why not? */
pw[pw_length] = 0;
#if defined(_OPENMP) && defined(__GLIBC__)
/*
* Let's use crypt_r(3) just like we will in crypt_all() below.
* It is possible that crypt(3) and crypt_r(3) differ in their supported hash
* types on a given system.
*/
{
struct crypt_data **data = &crypt_data[0];
if (!*data) {
/*
* **data is not exactly tiny, but we use mem_alloc_tiny() for its alignment
* support and error checking. We do not need to free() this memory anyway.
*
* The page alignment is to keep different threads' data on different pages.
*/
*data = mem_alloc_tiny(sizeof(**data), MEM_ALIGN_PAGE);
memset(*data, 0, sizeof(**data));
}
new_ciphertext = crypt_r(pw, ciphertext, *data);
}
#else
new_ciphertext = crypt(pw, ciphertext);
#endif
if (new_ciphertext && strlen(new_ciphertext) == length &&
!strncmp(new_ciphertext, ciphertext, 2)) {
sup_length[length] = 1;
sup_id[id] = 1;
return 1;
}
if (id != 10 && !ldr_in_pot)
if (john_main_process)
fprintf(stderr, "Warning: "
"hash encoding string length %d, type id %c%c\n"
"appears to be unsupported on this system; "
"will not load such hashes.\n",
length, id > 0x20 ? '$' : '#', id > 0x20 ? id : '0' + id);
if (!sup_length[length])
sup_length[length] = -1;
if (!sup_id[id])
sup_id[id] = -1;
return 0;
}
static void *binary(char *ciphertext)
{
static char out[BINARY_SIZE];
strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */
return out;
}
static void *salt(char *ciphertext)
{
static char out[SALT_SIZE];
int cut = sizeof(out);
#if 1
/* This piece is optional, but matching salts are not detected without it */
int length = strlen(ciphertext);
switch (length) {
case 13:
case 24:
cut = 2;
break;
case 20:
if (ciphertext[0] == '_') cut = 9;
break;
case 35:
case 46:
case 57:
if (ciphertext[0] != '$') cut = 2;
/* fall through */
default:
if ((length >= 26 && length <= 34 &&
!strncmp(ciphertext, "$1$", 3)) ||
(length >= 47 && !strncmp(ciphertext, "$5$", 3)) ||
(length >= 90 && !strncmp(ciphertext, "$6$", 3))) {
char *p = strrchr(ciphertext + 3, '$');
if (p) cut = p - ciphertext;
} else
if (length == 59 && !strncmp(ciphertext, "$2$", 3))
cut = 28;
else
if (length == 60 &&
(!strncmp(ciphertext, "$2a$", 4) ||
!strncmp(ciphertext, "$2b$", 4) ||
!strncmp(ciphertext, "$2x$", 4) ||
!strncmp(ciphertext, "$2y$", 4)))
cut = 29;
else
if (length >= 27 &&
(!strncmp(ciphertext, "$md5$", 5) ||
!strncmp(ciphertext, "$md5,", 5))) {
char *p = strrchr(ciphertext + 4, '$');
if (p) {
/* NUL padding is required */
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, ++p - ciphertext);
/*
* Workaround what looks like a bug in sunmd5.c: crypt_genhash_impl() where it
* takes a different substring as salt depending on whether the optional
* existing hash encoding is present after the salt or not. Specifically, the
* last '$' delimiter is included into the salt when there's no existing hash
* encoding after it, but is omitted from the salt otherwise.
*/
out[p - ciphertext] = 'x';
return out;
}
}
}
#endif
/* NUL padding is required */
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, cut);
return out;
}
#define H(s, i) \
((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1]))
#define H0(s) \
int i = strlen(s) - 2; \
return i > 0 ? H((s), i) & PH_MASK_0 : 0
#define H1(s) \
int i = strlen(s) - 2; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & PH_MASK_1 : 0
#define H2(s) \
int i = strlen(s) - 2; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & PH_MASK_2 : 0
#define H3(s) \
int i = strlen(s) - 2; \
return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10)) & PH_MASK_3 : 0
#define H4(s) \
int i = strlen(s) - 2; \
return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & PH_MASK_4 : 0
static int binary_hash_0(void *binary)
{
H0((char *)binary);
}
static int binary_hash_1(void *binary)
{
H1((char *)binary);
}
static int binary_hash_2(void *binary)
{
H2((char *)binary);
}
static int binary_hash_3(void *binary)
{
H3((char *)binary);
}
static int binary_hash_4(void *binary)
{
H4((char *)binary);
}
static int get_hash_0(int index)
{
H0(crypt_out[index]);
}
static int get_hash_1(int index)
{
H1(crypt_out[index]);
}
static int get_hash_2(int index)
{
H2(crypt_out[index]);
}
static int get_hash_3(int index)
{
H3(crypt_out[index]);
}
static int get_hash_4(int index)
{
H4(crypt_out[index]);
}
static int salt_hash(void *salt)
{
int i, h;
i = strlen((char *)salt) - 1;
if (i > 1) i--;
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i - 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])];
h ^= ((unsigned char *)salt)[i];
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
static int warned = 0;
int count = *pcount;
int index;
#if defined(_OPENMP) && defined(__GLIBC__)
#pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr)
for (index = 0; index < count; index++) {
char *hash;
int t = omp_get_thread_num();
if (t < MAX_THREADS) {
struct crypt_data **data = &crypt_data[t];
if (!*data) {
/* Stagger the structs to reduce their competition for the same cache lines */
size_t mask = MEM_ALIGN_PAGE, shift = 0;
while (t) {
mask >>= 1;
if (mask < MEM_ALIGN_CACHE)
break;
if (t & 1)
shift += mask;
t >>= 1;
}
*data = (void *)((char *)
mem_alloc_tiny(sizeof(**data) +
shift, MEM_ALIGN_PAGE) + shift);
memset(*data, 0, sizeof(**data));
}
hash = crypt_r(saved_key[index], saved_salt, *data);
} else { /* should not happen */
struct crypt_data data;
memset(&data, 0, sizeof(data));
hash = crypt_r(saved_key[index], saved_salt, &data);
}
if (!hash) {
#pragma omp critical
if (!warned) {
fprintf(stderr,
"Warning: crypt_r() returned NULL\n");
warned = 1;
}
hash = "";
}
strnzcpy(crypt_out[index], hash, BINARY_SIZE);
}
#else
#if defined(_OPENMP) && defined(__sun)
/*
* crypt(3C) is MT-safe on Solaris. For traditional DES-based hashes, this is
* implemented with locking (hence there's no speedup from the use of multiple
* threads, and the per-thread performance is extremely poor anyway). For
* modern hash types, the function is actually able to compute multiple hashes
* in parallel by different threads (and the performance for some hash types is
* reasonable). Overall, this code is reasonable to use for SHA-crypt and
* SunMD5 hashes, which are not yet supported by non-jumbo John natively.
*/
#pragma omp parallel for /* default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, stderr) or __iob */
#endif
for (index = 0; index < count; index++) {
char *hash = crypt(saved_key[index], saved_salt);
if (!hash) {
#if defined(_OPENMP) && defined(__sun)
#pragma omp critical
#endif
if (!warned) {
fprintf(stderr,
"Warning: crypt() returned NULL\n");
warned = 1;
}
hash = "";
}
strnzcpy(crypt_out[index], hash, BINARY_SIZE);
}
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!strcmp((char *)binary, crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !strcmp((char *)binary, crypt_out[index]);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* For generic crypt(3), the algorithm is returned as the first "tunable cost":
* 0: unknown (shouldn't happen
* 1: descrypt
* 2: md5crypt
* 3: sunmd5
* 4: bcrypt
* 5: sha256crypt
* 6: sha512crypt
* New subformats should be added to the end of the list.
* Otherwise, restored sessions might contine cracking different hashes
* if the (not yet implemented) option --cost= had been used
* when starting that session.
*/
static unsigned int c3_subformat_algorithm(void *salt)
{
char *c3_salt;
c3_salt = salt;
if (!c3_salt[0] || !c3_salt[1] )
return 0;
if (!c3_salt[2])
return 1;
if (c3_salt[0] != '$')
return 0;
if (c3_salt[1] == '1')
return 2;
if (c3_salt[1] == 'm')
return 3;
if (c3_salt[1] == '2' && c3_salt[2] == 'a')
return 4;
if (c3_salt[1] == '5')
return 5;
if (c3_salt[1] == '6')
return 6;
return 0;
}
static unsigned int c3_algorithm_specific_cost1(void *salt)
{
unsigned int algorithm, rounds;
char *c3_salt;
c3_salt = salt;
algorithm = c3_subformat_algorithm(salt);
if(algorithm < 3)
/* no tunable cost parameters */
return 1;
switch (algorithm) {
case 1:
// DES
return 25;
case 2:
// cryptmd5
return 1000;
case 3: // sun_md5
c3_salt = strstr(c3_salt, "rounds=");
if (!c3_salt) {
return 904+4096; // default
}
sscanf(c3_salt, "rounds=%d", &rounds);
return rounds+4096;
case 4: // bf
c3_salt += 4;
sscanf(c3_salt, "%d", &rounds);
return rounds;
case 5:
case 6:
// sha256crypt and sha512crypt handled the same: $x$rounds=xxxx$salt$hash (or $x$salt$hash for 5000 round default);
c3_salt += 3;
if (strncmp(c3_salt, "rounds=", 7))
return 5000; // default
sscanf(c3_salt, "rounds=%d", &rounds);
return rounds;
}
return 1;
}
struct fmt_main fmt_crypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
/*
* use algorithm as first tunable cost:
* (0: unknown)
* descrypt, md5crypt, sunmd5, bcrypt, sha512crypt, sha256crypt
*/
"algorithm [1:descrypt 2:md5crypt 3:sunmd5 4:bcrypt 5:sha256crypt 6:sha512crypt]",
"algorithm specific iterations",
},
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
salt,
{
c3_subformat_algorithm,
#if 1
c3_algorithm_specific_cost1
#endif
},
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif // HAVE_CRYPT
|
GB_unop__identity_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fp64)
// op(A') function: GB (_unop_tran__identity_int16_fp64)
// C type: int16_t
// A type: double
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fp64)
(
int16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gather_double.c | // create a list of 64 numbers, and only sum the even ones
#include <stdio.h>
#include <stdlib.h>
#define N 32000
int main() {
srand(time(NULL));
double *numbers = malloc(sizeof(double)*N);
int *mask = malloc(sizeof(int)*N);
// Init the numbers
for (int i = 0; i<N; i++) numbers[i] = rand() % 10;
for (int i = 0; i<N; i++) mask[i] = rand() % N;
/*for (int i = 0; i<8; i++) printf("%.1f ", numbers[i]);
puts("\n---");
for (int i = 0; i<8; i++) printf("%d ", mask[i]);
puts("\n---");*/
//Serial
double result1 = 0;
for (int i = 0; i<N; i++) {
result1 += numbers[mask[i]];
}
double result2 = 0;
#pragma omp simd reduction(+:result2)
for (int i = 0; i<N; i++) {
result2 += numbers[mask[i]];
}
// print
printf("Result1: %f | Result2: %f\n", result1, result2);
return 0;
}
|
mkldnn_graph.h | // Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ie_parallel.hpp"
#include "config.h"
#include "mkldnn_memory.h"
#include "mean_image.h"
#include "mkldnn_node.h"
#include "mkldnn_edge.h"
#include "mkldnn_streams.h"
#include <map>
#include <string>
#include <vector>
#include <memory>
namespace MKLDNNPlugin {
class MKLDNNGraph {
public:
typedef std::shared_ptr<MKLDNNGraph> Ptr;
int socket;
enum Status {
NotReady = 0,
Ready = 1,
};
MKLDNNGraph(): status(NotReady), eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)), socket(0) {}
Status GetStatus() {
return status;
}
bool IsReady() {
return (GetStatus() == Ready);
}
void setConfig(const Config &cfg);
void setProperty(const std::map<std::string, std::string> &properties);
Config getProperty();
void getInputBlobs(InferenceEngine::BlobMap &in_map);
void getOutputBlobs(InferenceEngine::BlobMap &out_map);
template<typename NET>
void CreateGraph(const NET &network,
const MKLDNNExtensionManager::Ptr& extMgr,
int socket = 0);
bool hasMeanImageFor(const std::string& name) {
return _meanImages.find(name) != _meanImages.end();
}
void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in);
void PullOutputData(InferenceEngine::BlobMap &out);
void Infer(int batch = -1);
std::vector<MKLDNNNodePtr>& GetNodes() {
return graphNodes;
}
std::vector<MKLDNNEdgePtr>& GetEdges() {
return graphEdges;
}
std::vector<MKLDNNNodePtr>& GetOutputNodes() {
return outputNodes;
}
mkldnn::engine getEngine() const {
return eng;
}
void GetPerfData(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap) const;
void RemoveDroppedNodes();
void RemoveDroppedEdges();
void DropNode(const MKLDNNNodePtr& node);
void DropDWConvNode(const MKLDNNNodePtr& node);
void CreateArenaWithObserverAndLoadGraph(int threads_per_stream, int numa_node, int stream_id,
Config::InferenceThreadsBinding pinning,
std::shared_ptr<ICNNNetwork> clonedNetwork, const MKLDNNExtensionManager::Ptr& extensionManager) {
auto load = [clonedNetwork, extensionManager, numa_node, this](){
CreateGraph(static_cast<const ICNNNetwork&>(*clonedNetwork), extensionManager, numa_node);
};
#if(IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO)
if (Config::InferenceThreadsBinding::NUMA == pinning) {
ptrArena = std::unique_ptr<tbb::task_arena>(
new tbb::task_arena(tbb::task_arena::constraints(numa_node, threads_per_stream)));
// the (pre-pinned) arena will load the graph (so that blobs memory is first touched by the right threads)
} else {
// regular arena
ptrArena = std::unique_ptr<tbb::task_arena>(new tbb::task_arena(threads_per_stream));
if (Config::InferenceThreadsBinding::CORES == pinning) {
// custom observer (that pins threads to cores)
CreateObserver(stream_id, threads_per_stream);
}
}
ptrArena->execute([&load](){
load();
});
#else
#if IE_THREAD == IE_THREAD_OMP
omp_set_num_threads(threads_per_stream);
#endif
// check that no (affinity-related) OMP envs are set, so user doesn't do a custom pinning
if (!check_env_variables() && (Config::InferenceThreadsBinding::NONE != pinning))
CreateObserver(stream_id, threads_per_stream);
load();
#endif
}
InferenceEngine::ICNNNetwork::Ptr dump() const;
template<typename NET>
static void ApplyUnrollPasses(NET &net);
void ResetInferCount() { infer_count = 0; }
void SortTopologically();
protected:
void CreateObserver(int _stream_id, int _threads_per_stream, int _pinning_step = 1) {
// Notice that custom pinning/observer work (via sched_setaffinity) ONLY on Linux,
// in all other cases the below code is actually just a stub
#if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO)
ptrObserver
= std::unique_ptr<tbb::task_scheduler_observer>(
new pinning_observer(*ptrArena, _stream_id, _threads_per_stream, _pinning_step));
#else
cpu_set_t *process_mask = nullptr;
int ncpus = 0;
get_process_mask(ncpus, process_mask);
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for
for (int thread_index = 0; thread_index < _threads_per_stream; thread_index++) {
pin_thread_to_vacant_core(_stream_id * _threads_per_stream + thread_index, 1, ncpus, process_mask);
}
#elif IE_THREAD == IE_THREAD_SEQ
pin_thread_to_vacant_core(_stream_id * _threads_per_stream, 1, ncpus, process_mask);
#endif
CPU_FREE(process_mask);
#endif
}
void VisitNode(MKLDNNNodePtr node, std::vector<MKLDNNNodePtr>& sortedNodes);
void ForgetGraphData() {
status = NotReady;
eng = mkldnn::engine(mkldnn::engine::kind::cpu, 0);
inputNodes.clear();
outputNodes.clear();
graphNodes.clear();
graphEdges.clear();
_meanImages.clear();
}
Status status;
Config config;
// For dumping purposes. -1 - no counting, all other positive
// values mean increment it within each Infer() call
int infer_count = -1;
bool reuse_io_tensors = true;
MKLDNNMemoryPtr memWorkspace;
std::map<std::string, MKLDNNNodePtr> inputNodes;
std::vector<MKLDNNNodePtr> outputNodes;
std::vector<MKLDNNNodePtr> graphNodes;
std::vector<MKLDNNEdgePtr> graphEdges;
std::map<std::string, MeanImage> _meanImages;
std::string _name;
#if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO)
std::unique_ptr<tbb::task_arena> ptrArena;
std::unique_ptr<tbb::task_scheduler_observer> ptrObserver;
#endif
mkldnn::engine eng;
void Replicate(const ICNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr);
void Replicate(const TensorIterator::Body &subgraph, const MKLDNNExtensionManager::Ptr& extMgr);
void InitGraph();
void InitNodes();
void InitEdges();
void Allocate();
void AllocateWithReuse();
void CreatePrimitives();
void do_before(const std::string &dir, const MKLDNNNodePtr &node);
void do_after(const std::string &dir, const MKLDNNNodePtr &node);
friend class MKLDNNInferRequest;
friend class MKLDNNGraphlessInferRequest;
friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
private:
void dumpToDotFile(std::string file) const;
struct ParsedLayer {
MKLDNNNodePtr parent;
InferenceEngine::CNNLayerPtr cnnLayer;
size_t outIdx;
};
};
} // namespace MKLDNNPlugin
|
target_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd foo
void test_no_clause() {
int i;
#pragma omp target simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target simd' must be a for loop}}
#pragma omp target simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
#pragma omp target simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
|
bdlevd2.c | #include "laev2.h"
#include "wnrme.h"
#include "rnd.h"
#include "timer.h"
int main(int argc, char *argv[])
{
(void)set_cbwr();
if (4 != argc) {
(void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv);
return EXIT_FAILURE;
}
const size_t n = ((size_t)1u << atoz(argv[2u]));
int th = 0;
#ifdef _OPENMP
th = omp_get_max_threads();
if (n % th) {
(void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th);
return EXIT_FAILURE;
}
#endif /* _OPENMP */
const size_t b = atoz(argv[3u]);
if (!b)
return EXIT_SUCCESS;
const size_t
nl = strlen(argv[1u]),
nl1 = (nl + 1u);
char *const fn = calloc((nl + 3u), sizeof(char));
assert(fn);
strcpy(fn, argv[1u])[nl] = '.';
int fm = O_RDONLY;
#ifdef _LARGEFILE64_SOURCE
fm |= O_LARGEFILE;
#endif /* _LARGEFILE64_SOURCE */
fn[nl1] = 'k';
const int fk = open(fn, fm);
if (-1 >= fk) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'l';
const int fl = open(fn, fm);
if (-1 >= fl) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'f';
const int ff = open(fn, fm);
if (-1 >= ff) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'g';
const int fg = open(fn, fm);
if (-1 >= fg) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'h';
const int fh = open(fn, fm);
if (-1 >= fh) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
const size_t nt = n * sizeof(double);
double
*const a11 = (double*)aligned_alloc(sizeof(double), nt),
*const a22 = (double*)aligned_alloc(sizeof(double), nt),
*const a21 = (double*)aligned_alloc(sizeof(double), nt),
*const cs1 = (double*)aligned_alloc(sizeof(double), nt),
*const sn1 = (double*)aligned_alloc(sizeof(double), nt),
*const l1 = (double*)aligned_alloc(sizeof(double), nt),
*const l2 = (double*)aligned_alloc(sizeof(double), nt);
assert(a11);
assert(a22);
assert(a21);
assert(cs1);
assert(sn1);
assert(l1);
assert(l2);
unsigned rd[2u] = { 0u, 0u };
const uint64_t hz = tsc_get_freq_hz_(rd);
(void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]);
(void)fflush(stderr);
(void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n");
(void)fflush(stdout);
const char *bf = (const char*)NULL;
if (b <= 10u)
bf = "%1zu";
else if (b <= 100u)
bf = "%2zu";
else if (b <= 1000u)
bf = "%3zu";
else // b > 1000
bf = "%zu";
const size_t n_t = n / imax(th, 1);
const size_t cnt = n_t * sizeof(double);
char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' };
for (size_t j = 0u; j < b; ++j) {
(void)fprintf(stdout, bf, j);
(void)fflush(stdout);
const size_t jn = j * n;
#ifdef _OPENMP
#pragma omp parallel default(none) shared(ff,fg,fh,a11,a22,a21,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(double);
if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fh, (a21 + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
uint64_t be[2u] = { UINT64_C(0), UINT64_C(0) };
be[0u] = rdtsc_beg(rd);
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,a11,a22,a21,l1,l2,cs1,sn1)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
#ifdef USE_INL
_dlaev2((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i));
#else /* !USE_INL */
LAPACK_D(laev2)((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i));
#endif /* ?USE_INL */
}
be[1u] = rdtsc_end(rd);
(void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u]));
(void)fflush(stdout);
wide o = W_ZERO, r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,a11,a22,a21,cs1,sn1,l1,l2) reduction(max:o,r)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
wide AE = W_ZERO, AN = W_ZERO;
o = fmaxw(o, worr(cs1[i], sn1[i]));
r = fmaxw(r, wrer(a11[i], a22[i], a21[i], cs1[i], sn1[i], l1[i], l2[i], &AE, &AN));
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)o));
(void)fprintf(stdout, "%s", xtoa(a, (long double)r));
(void)fflush(stdout);
#ifdef _OPENMP
#pragma omp parallel default(none) shared(fk,fl,cs1,sn1,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(double);
if ((ssize_t)cnt != pread(fk, (cs1 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fl, (sn1 + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
wide x = W_ZERO, m = W_ZERO;
r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,l1,l2,cs1,sn1) reduction(max:r,x,m)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
wide AE = W_ZERO, AN = W_ZERO;
const wide RE = wlam(l1[i], l2[i], cs1[i], sn1[i], &AE, &AN);
r = fmaxw(r, RE);
x = fmaxw(x, AE);
m = fmaxw(m, AN);
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)r));
(void)fprintf(stdout, "%s,", xtoa(a, (long double)x));
(void)fprintf(stdout, "%s\n", xtoa(a, (long double)m));
(void)fflush(stdout);
}
(void)close(fh);
(void)close(fg);
(void)close(ff);
(void)close(fl);
(void)close(fk);
free(l2);
free(l1);
free(sn1);
free(cs1);
free(a21);
free(a22);
free(a11);
free(fn);
return EXIT_SUCCESS;
}
|
dtrmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrmm.c, normal z -> d, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^T
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dtrmm
* @sa plasma_ctrmm
* @sa plasma_dtrmm
* @sa plasma_strmm
*
******************************************************************************/
int plasma_dtrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, double *pA, int lda,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
return -1;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -2;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans )
{
plasma_error("illegal value of transa");
return -3;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int k = (side == PlasmaLeft) ? m : n;
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trmm(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_triangular_create(PlasmaRealDouble, uplo, nb, nb,
k, k, 0, 0, k, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_triangular_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_dtr2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async interface.
plasma_omp_dtrmm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
return sequence.status;
}
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs triangular matrix multiplication. Non-blocking tile version of
* plasma_dtrmm(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of the triangular matrix A.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dtrmm
* @sa plasma_omp_ctrmm
* @sa plasma_omp_dtrmm
* @sa plasma_omp_strmm
*
******************************************************************************/
void plasma_omp_dtrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
double alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorNotInitialized);
return;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.m == 0 || B.n == 0)
return;
if (alpha == 0.0) {
double zzero = 0.0;
plasma_pdlaset(PlasmaGeneral, zzero, zzero, B, sequence, request);
return;
}
// Call parallel function.
plasma_pdtrmm(side, uplo, transa, diag, alpha,
A, B,
sequence, request);
}
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef FLANN_NNINDEX_H
#define FLANN_NNINDEX_H
#include <vector>
#include "flann/general.h"
#include "flann/util/matrix.h"
#include "flann/util/params.h"
#include "flann/util/result_set.h"
#include "flann/util/dynamic_bitset.h"
#include "flann/util/saving.h"
namespace flann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds the index using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.h.data_type = flann_datatype_value<ElementType>::value;
header.h.index_type = getType();
header.h.rows = size_;
header.h.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strncmp(header.h.signature,
FLANN_SIGNATURE_,
strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.h.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.h.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
grib_bits_fast_big_endian_omp.c | /*
* Copyright 2005-2017 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
*
* In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
* virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
*/
/***************************************************************************
* Enrico Fucile - 19.06.2007 *
* *
***************************************************************************/
int grib_decode_long_array(const unsigned char* p, long *bitp, long nbits,size_t size,long* val) {
long i=0;
long countOfLeftmostBits=0,leftmostBits=0;
long startBit,startByte;
long remainingBits = nbits;
long *pp=(long*)p;
int inited=0;
unsigned long uval=0;
if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
if (startBit == max_nbits) {
startBit = 0;
pp++;
}
val[i]=VALUE(*pp,startBit,remainingBits);
startBit+=remainingBits;
remainingBits=nbits;
}
} else {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
countOfLeftmostBits = startBit + remainingBits;
if (countOfLeftmostBits > max_nbits) {
countOfLeftmostBits = max_nbits - startBit;
remainingBits -= countOfLeftmostBits;
leftmostBits=(VALUE(*(pp++),startBit,countOfLeftmostBits)) << remainingBits;
startBit = 0;
} else
leftmostBits = 0;
val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits));
startBit+=remainingBits;
remainingBits=nbits;
}
}
*bitp+=size*nbits;
return GRIB_SUCCESS;
}
int grib_decode_double_array(const unsigned char* p, long *bitp, long nbits,double reference_value,double s,double d,size_t size,double* val) {
long i=0;
long countOfLeftmostBits=0,leftmostBits=0;
long startBit,startByte;
long remainingBits = nbits;
long *pp=(long*)p;
int inited=0;
unsigned long uval=0;
double fact=s*d;
double bias=reference_value*d;
if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
if (startBit == max_nbits) {
startBit = 0;
pp++;
}
val[i]=VALUE(*pp,startBit,remainingBits);
val[i]= val[i] * fact + bias ;
startBit+=remainingBits;
remainingBits=nbits;
}
} else {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
countOfLeftmostBits = startBit + remainingBits;
if (countOfLeftmostBits > max_nbits) {
countOfLeftmostBits = max_nbits - startBit;
remainingBits -= countOfLeftmostBits;
leftmostBits=(VALUE(*(pp++),startBit,countOfLeftmostBits)) << remainingBits;
startBit = 0;
} else
leftmostBits = 0;
val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits));
val[i]= val[i] * fact + bias ;
startBit+=remainingBits;
remainingBits=nbits;
}
}
*bitp+=size*nbits;
return GRIB_SUCCESS;
}
int grib_decode_double_array_complex(const unsigned char* p, long *bitp, long nbits,double reference_value,double s,double* d,size_t size,double* val) {
long i=0;
long countOfLeftmostBits=0,leftmostBits=0;
long startBit;
long remainingBits = nbits;
long *pp=(long*)p;
int inited=0;
unsigned long uval=0;
if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
if (startBit == max_nbits) {
startBit = 0;
pp++;
}
val[i]=VALUE(*pp,startBit,remainingBits);
val[i]= ((( (val[i]) * s)+reference_value)*d[i/2]);
startBit+=remainingBits;
remainingBits=nbits;
}
} else {
#pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits)
for (i=0;i<size;i++) {
if (!inited) {
startBit=*bitp+i*nbits;
remainingBits = nbits;
if (startBit >= max_nbits) {
pp+=startBit/max_nbits;
startBit %= max_nbits;
}
inited=1;
}
countOfLeftmostBits = startBit + remainingBits;
if (countOfLeftmostBits > max_nbits) {
countOfLeftmostBits = max_nbits - startBit;
remainingBits -= countOfLeftmostBits;
leftmostBits=(VALUE(*pp,startBit,countOfLeftmostBits)) << remainingBits;
startBit = 0;
pp++;
} else
leftmostBits = 0;
val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits));
val[i]= ((( (val[i]) * s)+reference_value)*d[i/2]);
startBit+=remainingBits;
remainingBits=nbits;
}
}
*bitp+=size*nbits;
return GRIB_SUCCESS;
}
int grib_encode_double_array(size_t n_vals,const double* val,long nbits,double reference_value,double d,double divisor,unsigned char* p,long *bitp)
{
long* destination = (long*)p;
double* v=(double*)val;
long countOfLeftmostBits=0,startBit=0,remainingBits=0,rightmostBits=0;
unsigned long uval=0;
size_t i=0;
startBit=*bitp;
remainingBits = nbits;
if (startBit >= max_nbits) {
destination += startBit / max_nbits;
startBit %= max_nbits;
}
if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) {
for(i=0;i< n_vals;i++){
uval = (unsigned long)(((((*v)*d)-reference_value)*divisor)+0.5);
if (startBit == max_nbits) {
startBit = 0;
destination++;
}
rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits);
*destination = ((*destination) & ~MASKVALUE(startBit,remainingBits))
+ (rightmostBits << max_nbits-(remainingBits+startBit));
startBit+=remainingBits;
remainingBits=nbits;
v++;
}
} else {
for(i=0;i< n_vals;i++){
countOfLeftmostBits = startBit + remainingBits;
uval = (unsigned long)(((((*v)*d)-reference_value)*divisor)+0.5);
if (countOfLeftmostBits > max_nbits) {
countOfLeftmostBits = max_nbits - startBit;
startBit = max_nbits - remainingBits;
remainingBits -= countOfLeftmostBits;
*destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits)
+ (VALUE(uval,startBit,countOfLeftmostBits));
startBit = 0;
destination++;
}
rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits);
*destination = ((*destination) & ~MASKVALUE(startBit,remainingBits))
+ (rightmostBits << max_nbits-(remainingBits+startBit));
startBit+=remainingBits;
remainingBits=nbits;
v++;
}
}
*bitp+=n_vals*nbits;
return GRIB_SUCCESS;
}
int grib_encode_double_array_complex(size_t n_vals,double* val,long nbits,double reference_value,
double* scal,double d,double divisor,unsigned char* p,long *bitp) {
long* destination = (long*)p;
double* v=val;
long countOfLeftmostBits=0,startBit=0,remainingBits=0,rightmostBits=0;
unsigned long uval=0;
size_t i=0;
startBit=*bitp;
remainingBits = nbits;
if (startBit >= max_nbits) {
destination += startBit / max_nbits;
startBit %= max_nbits;
}
if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) {
for(i=0;i< n_vals;i++) {
uval = (unsigned long)(((((*v)*d*scal[i/2])-reference_value)*divisor)+0.5);
if (startBit == max_nbits) {
startBit = 0;
destination++;
}
rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits);
*destination = ((*destination) & ~MASKVALUE(startBit,remainingBits))
+ (rightmostBits << max_nbits-(remainingBits+startBit));
startBit+=remainingBits;
remainingBits=nbits;
v++;
}
} else {
for(i=0;i< n_vals;i++) {
countOfLeftmostBits = startBit + remainingBits;
uval = (unsigned long)(((((*v)*d*scal[i/2])-reference_value)*divisor)+0.5);
if (countOfLeftmostBits > max_nbits) {
countOfLeftmostBits = max_nbits - startBit;
startBit = max_nbits - remainingBits;
remainingBits -= countOfLeftmostBits;
*destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits)
+ (VALUE(uval,startBit,countOfLeftmostBits));
startBit = 0;
destination++;
}
rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits);
*destination = ((*destination) & ~MASKVALUE(startBit,remainingBits))
+ (rightmostBits << max_nbits-(remainingBits+startBit));
startBit+=remainingBits;
remainingBits=nbits;
v++;
}
}
*bitp+=n_vals*nbits;
return 0;
}
|
DRB019-plusplus-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Race condition on outLen due to unprotected writes.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data race pairs: we allow two pair to preserve the original code pattern.
1. outLen@72:12 vs. outLen@72:12
2. output[]@72:5 vs. output[]@72:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i ;
int inLen=1000 ;
int outLen = 0;
if (argc>1)
inLen= atoi(argv[1]);
int input[inLen];
int output[inLen];
#pragma omp parallel for private(i)
for (i=0; i<inLen; ++i)
input[i]=i;
for (i=0; i<inLen; ++i) {
output[outLen++] = input[i] ;
}
printf("output[0]=%d\n", output[0]);
return 0;
}
|
fibonacci1.c | /* OpenMP TASK Example: Computing Fibonacci Numbers */
#include <stdio.h>
#include <omp.h>
int fib(int n) {
int i, j;
if (n<2)
return n;
else {
#pragma omp task shared(i) firstprivate(n)
i=fib(n-1);
#pragma omp task shared(j) firstprivate(n)
j=fib(n-2);
#pragma omp taskwait
return i+j;
}
}
int main() {
int n = 10;
omp_set_dynamic(0);
omp_set_num_threads(4);
#pragma omp parallel shared(n)
{
#pragma omp single
printf ("fib(%d) = %d\n", n, fib(n));
}
}
|
intruder.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256] = { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
char exitmsg[1024];
GOTO_REAL();
load_syncchar_map("sync_char.map.intruder");
/*
* Initialization
*/
parseArgs(argc, (char** const)argv);
sprintf(exitmsg, "END BENCHMARK %s-parallel-phase\n", argv[0]);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)malloc(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
TIMER_T startTime;
TIMER_READ(startTime);
OSA_PRINT("entering parallel phase\n",0);
START_INSTRUMENTATION();
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
GOTO_REAL();
OSA_PRINT("exiting parallel phase\n",0);
OSA_PRINT(exitmsg,0);
STOP_INSTRUMENTATION();;
TIMER_T stopTime;
TIMER_READ(stopTime);
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
free(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
IO.h | // This code is part of the project "Ligra: A Lightweight Graph Processing
// Framework for Shared Memory", presented at Principles and Practice of
// Parallel Programming, 2013.
// Copyright (c) 2013 Julian Shun and Guy Blelloch
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cmath>
#include <sys/mman.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <parallel/algorithm>
#include <omp.h>
#include <cassert>
#include "parallel.h"
#include "blockRadixSort.h"
#include "quickSort.h"
#include "utils.h"
#include "graph.h"
#include "pvector.h"
#include "timer.h"
#include "sliding_queue.h"
using namespace std;
typedef pair<uintE,uintE> intPair;
typedef pair<uintE, pair<uintE,intE> > intTriple;
const double THRESHOLD {4.0f};
template <class E>
struct pairFirstCmp {
bool operator() (pair<uintE,E> a, pair<uintE,E> b) {
return a.first < b.first; }
};
template <class E>
struct getFirst {uintE operator() (pair<uintE,E> a) {return a.first;} };
template <class IntType>
struct pairBothCmp {
bool operator() (pair<uintE,IntType> a, pair<uintE,IntType> b) {
if (a.first != b.first) return a.first < b.first;
return a.second < b.second;
}
};
// A structure that keeps a sequence of strings all allocated from
// the same block of memory
struct words {
long n; // total number of characters
char* Chars; // array storing all strings
long m; // number of substrings
char** Strings; // pointers to strings (all should be null terminated)
words() {}
words(char* C, long nn, char** S, long mm)
: Chars(C), n(nn), Strings(S), m(mm) {}
void del() {free(Chars); free(Strings);}
};
inline bool isSpace(char c) {
switch (c) {
case '\r':
case '\t':
case '\n':
case 0:
case ' ' : return true;
default : return false;
}
}
_seq<char> mmapStringFromFile(const char *filename) {
struct stat sb;
int fd = open(filename, O_RDONLY);
if (fd == -1) {
perror("open");
exit(-1);
}
if (fstat(fd, &sb) == -1) {
perror("fstat");
exit(-1);
}
if (!S_ISREG (sb.st_mode)) {
perror("not a file\n");
exit(-1);
}
char *p = static_cast<char*>(mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0));
if (p == MAP_FAILED) {
perror("mmap");
exit(-1);
}
if (close(fd) == -1) {
perror("close");
exit(-1);
}
size_t n = sb.st_size;
// char *bytes = newA(char, n);
// parallel_for(size_t i=0; i<n; i++) {
// bytes[i] = p[i];
// }
// if (munmap(p, sb.st_size) == -1) {
// perror("munmap");
// exit(-1);
// }
// cout << "mmapped" << endl;
// free(bytes);
// exit(0);
return _seq<char>(p, n);
}
_seq<char> readStringFromFile(char *fileName) {
ifstream file (fileName, ios::in | ios::binary | ios::ate);
if (!file.is_open()) {
std::cout << "Unable to open file: " << fileName << std::endl;
abort();
}
long end = file.tellg();
file.seekg (0, ios::beg);
long n = end - file.tellg();
char* bytes = newA(char,n+1);
assert(bytes != NULL && "Malloc failure\n");
file.read (bytes,n);
file.close();
return _seq<char>(bytes,n);
}
// parallel code for converting a string to words
words stringToWords(char *Str, long n) {
{parallel_for (long i=0; i < n; i++)
if (isSpace(Str[i])) Str[i] = 0; }
// mark start of words
bool *FL = newA(bool,n);
assert(FL != NULL && "Malloc failure\n");
FL[0] = Str[0];
{parallel_for (long i=1; i < n; i++) FL[i] = Str[i] && !Str[i-1];}
// offset for each start of word
_seq<long> Off = sequence::packIndex<long>(FL, n);
free(FL);
long m = Off.n;
long *offsets = Off.A;
// pointer to each start of word
char **SA = newA(char*, m);
assert(SA != NULL && "Malloc failure\n");
{parallel_for (long j=0; j < m; j++) SA[j] = Str+offsets[j];}
free(offsets);
return words(Str,n,SA,m);
}
template <class vertex>
graph<vertex> readGraphFromFile(char* fname, bool isSymmetric, bool mmap) {
Timer t;
t.Start();
words W;
if (mmap) {
_seq<char> S = mmapStringFromFile(fname);
char *bytes = newA(char, S.n);
assert(bytes != NULL && "Malloc failure\n");
// Cannot mutate the graph unless we copy.
parallel_for(size_t i=0; i<S.n; i++) {
bytes[i] = S.A[i];
}
if (munmap(S.A, S.n) == -1) {
perror("munmap");
exit(-1);
}
S.A = bytes;
W = stringToWords(S.A, S.n);
} else {
_seq<char> S = readStringFromFile(fname);
W = stringToWords(S.A, S.n);
}
#ifndef WEIGHTED
if (W.Strings[0] != (string) "AdjacencyGraph") {
#else
if (W.Strings[0] != (string) "WeightedAdjacencyGraph") {
#endif
cout << "Bad input file" << endl;
abort();
}
long len = W.m -1;
long n = atol(W.Strings[1]);
long m = atol(W.Strings[2]);
#ifndef WEIGHTED
if (len != n + m + 2) {
#else
if (len != n + 2*m + 2) {
#endif
cout << "Bad input file" << endl;
abort();
}
uintT* offsets = newA(uintT,n);
assert(offsets != NULL && "Malloc failure\n");
#ifndef WEIGHTED
uintE* edges = newA(uintE,m);
#else
intE* edges = newA(intE,2*m);
#endif
assert(edges != NULL && "Malloc failure\n");
{parallel_for(long i=0; i < n; i++) offsets[i] = atol(W.Strings[i + 3]);}
{parallel_for(long i=0; i<m; i++) {
#ifndef WEIGHTED
edges[i] = atol(W.Strings[i+n+3]);
#else
edges[2*i] = atol(W.Strings[i+n+3]);
edges[2*i+1] = atol(W.Strings[i+n+m+3]);
#endif
}}
//W.del(); // to deal with performance bug in malloc
W.del(); //The original code ^ commented this out
vertex* v = newA(vertex,n);
assert(v != NULL && "Malloc failure\n");
{parallel_for (uintT i=0; i < n; i++) {
uintT o = offsets[i];
uintT l = ((i == n-1) ? m : offsets[i+1])-offsets[i];
v[i].setOutDegree(l);
#ifndef WEIGHTED
v[i].setOutNeighbors(edges+o);
#else
v[i].setOutNeighbors(edges+2*o);
#endif
}}
if(!isSymmetric) {
uintT* tOffsets = newA(uintT,n);
assert(tOffsets != NULL && "Malloc failure\n");
{parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;}
#ifndef WEIGHTED
intPair* temp = newA(intPair,m);
#else
intTriple* temp = newA(intTriple,m);
#endif
assert(temp != NULL && "Malloc failure\n");
{parallel_for(long i=0;i<n;i++){
uintT o = offsets[i];
for(uintT j=0;j<v[i].getOutDegree();j++){
#ifndef WEIGHTED
temp[o+j] = make_pair(v[i].getOutNeighbor(j),i);
#else
temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j)));
#endif
}
}}
free(offsets);
#ifndef WEIGHTED
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<uintE>());
#else
quickSort(temp,m,pairFirstCmp<uintE>());
#endif
#else
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<intPair>());
#else
quickSort(temp,m,pairFirstCmp<intPair>());
#endif
#endif
tOffsets[temp[0].first] = 0;
#ifndef WEIGHTED
uintE* inEdges = newA(uintE,m);
inEdges[0] = temp[0].second;
#else
intE* inEdges = newA(intE,2*m);
inEdges[0] = temp[0].second.first;
inEdges[1] = temp[0].second.second;
#endif
assert(inEdges != NULL && "Malloc failure\n");
{parallel_for(long i=1;i<m;i++) {
#ifndef WEIGHTED
inEdges[i] = temp[i].second;
#else
inEdges[2*i] = temp[i].second.first;
inEdges[2*i+1] = temp[i].second.second;
#endif
if(temp[i].first != temp[i-1].first) {
tOffsets[temp[i].first] = i;
}
}}
free(temp);
//fill in offsets of degree 0 vertices by taking closest non-zero
//offset to the right
sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m);
{parallel_for(long i=0;i<n;i++){
uintT o = tOffsets[i];
uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i];
v[i].setInDegree(l);
#ifndef WEIGHTED
v[i].setInNeighbors(inEdges+o);
#else
v[i].setInNeighbors(inEdges+2*o);
#endif
}}
free(tOffsets);
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges);
t.Stop();
t.PrintTime("Graph reading time(s)", t.Seconds());
std::cout << "Read directed graph. Num Nodes = " << n << " and Num Edges = " << m << "\n";
return graph<vertex>(v,n,m,mem);
}
else {
free(offsets);
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges);
t.Stop();
t.PrintTime("Graph reading time(s)", t.Seconds());
std::cout << "Read undirected graph. Num Nodes = " << n << " and Num Edges = " << m << "\n";
return graph<vertex>(v,n,m,mem);
}
}
template <class vertex>
graph<vertex> readGraphFromBinary(char* iFile, bool isSymmetric) {
char* config = (char*) ".config";
char* adj = (char*) ".adj";
char* idx = (char*) ".idx";
char configFile[strlen(iFile)+strlen(config)+1];
char adjFile[strlen(iFile)+strlen(adj)+1];
char idxFile[strlen(iFile)+strlen(idx)+1];
*configFile = *adjFile = *idxFile = '\0';
strcat(configFile,iFile);
strcat(adjFile,iFile);
strcat(idxFile,iFile);
strcat(configFile,config);
strcat(adjFile,adj);
strcat(idxFile,idx);
ifstream in(configFile, ifstream::in);
long n;
in >> n;
in.close();
ifstream in2(adjFile,ifstream::in | ios::binary); //stored as uints
in2.seekg(0, ios::end);
long size = in2.tellg();
in2.seekg(0);
#ifdef WEIGHTED
long m = size/(2*sizeof(uint));
#else
long m = size/sizeof(uint);
#endif
char* s = (char *) malloc(size);
in2.read(s,size);
in2.close();
uintE* edges = (uintE*) s;
ifstream in3(idxFile,ifstream::in | ios::binary); //stored as longs
in3.seekg(0, ios::end);
size = in3.tellg();
in3.seekg(0);
if(n != size/sizeof(intT)) { cout << "File size wrong\n"; abort(); }
char* t = (char *) malloc(size);
in3.read(t,size);
in3.close();
uintT* offsets = (uintT*) t;
vertex* v = newA(vertex,n);
#ifdef WEIGHTED
intE* edgesAndWeights = newA(intE,2*m);
{parallel_for(long i=0;i<m;i++) {
edgesAndWeights[2*i] = edges[i];
edgesAndWeights[2*i+1] = edges[i+m];
}}
//free(edges);
#endif
{parallel_for(long i=0;i<n;i++) {
uintT o = offsets[i];
uintT l = ((i==n-1) ? m : offsets[i+1])-offsets[i];
v[i].setOutDegree(l);
#ifndef WEIGHTED
v[i].setOutNeighbors((uintE*)edges+o);
#else
v[i].setOutNeighbors(edgesAndWeights+2*o);
#endif
}}
if(!isSymmetric) {
uintT* tOffsets = newA(uintT,n);
{parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;}
#ifndef WEIGHTED
intPair* temp = newA(intPair,m);
#else
intTriple* temp = newA(intTriple,m);
#endif
{parallel_for(intT i=0;i<n;i++){
uintT o = offsets[i];
for(uintT j=0;j<v[i].getOutDegree();j++){
#ifndef WEIGHTED
temp[o+j] = make_pair(v[i].getOutNeighbor(j),i);
#else
temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j)));
#endif
}
}}
free(offsets);
#ifndef WEIGHTED
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<uintE>());
#else
quickSort(temp,m,pairFirstCmp<uintE>());
#endif
#else
#ifndef LOWMEM
intSort::iSort(temp,m,n+1,getFirst<intPair>());
#else
quickSort(temp,m,pairFirstCmp<intPair>());
#endif
#endif
tOffsets[temp[0].first] = 0;
#ifndef WEIGHTED
uintE* inEdges = newA(uintE,m);
inEdges[0] = temp[0].second;
#else
intE* inEdges = newA(intE,2*m);
inEdges[0] = temp[0].second.first;
inEdges[1] = temp[0].second.second;
#endif
{parallel_for(long i=1;i<m;i++) {
#ifndef WEIGHTED
inEdges[i] = temp[i].second;
#else
inEdges[2*i] = temp[i].second.first;
inEdges[2*i+1] = temp[i].second.second;
#endif
if(temp[i].first != temp[i-1].first) {
tOffsets[temp[i].first] = i;
}
}}
free(temp);
//fill in offsets of degree 0 vertices by taking closest non-zero
//offset to the right
sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m);
{parallel_for(long i=0;i<n;i++){
uintT o = tOffsets[i];
uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i];
v[i].setInDegree(l);
#ifndef WEIGHTED
v[i].setInNeighbors((uintE*)inEdges+o);
#else
v[i].setInNeighbors((intE*)(inEdges+2*o));
#endif
}}
free(tOffsets);
#ifndef WEIGHTED
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges);
return graph<vertex>(v,n,m,mem);
#else
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights,inEdges);
return graph<vertex>(v,n,m,mem);
#endif
}
free(offsets);
#ifndef WEIGHTED
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges);
return graph<vertex>(v,n,m,mem);
#else
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights);
return graph<vertex>(v,n,m,mem);
#endif
}
template <class vertex>
graph<vertex> readGraph(char* iFile, bool compressed, bool symmetric, bool binary, bool mmap) {
if(binary) return readGraphFromBinary<vertex>(iFile,symmetric);
else return readGraphFromFile<vertex>(iFile,symmetric,mmap);
}
template <class vertex>
graph<vertex> readCompressedGraph(char* fname, bool isSymmetric, bool mmap) {
char* s;
if (mmap) {
_seq<char> S = mmapStringFromFile(fname);
// Cannot mutate graph unless we copy.
char *bytes = newA(char, S.n);
parallel_for(size_t i=0; i<S.n; i++) {
bytes[i] = S.A[i];
}
if (munmap(S.A, S.n) == -1) {
perror("munmap");
exit(-1);
}
s = bytes;
} else {
ifstream in(fname,ifstream::in |ios::binary);
in.seekg(0,ios::end);
long size = in.tellg();
in.seekg(0);
cout << "size = " << size << endl;
s = (char*) malloc(size);
in.read(s,size);
in.close();
}
long* sizes = (long*) s;
long n = sizes[0], m = sizes[1], totalSpace = sizes[2];
cout << "n = "<<n<<" m = "<<m<<" totalSpace = "<<totalSpace<<endl;
cout << "reading file..."<<endl;
uintT* offsets = (uintT*) (s+3*sizeof(long));
long skip = 3*sizeof(long) + (n+1)*sizeof(intT);
uintE* Degrees = (uintE*) (s+skip);
skip+= n*sizeof(intE);
uchar* edges = (uchar*)(s+skip);
uintT* inOffsets;
uchar* inEdges;
uintE* inDegrees;
if(!isSymmetric){
skip += totalSpace;
uchar* inData = (uchar*)(s + skip);
sizes = (long*) inData;
long inTotalSpace = sizes[0];
cout << "inTotalSpace = "<<inTotalSpace<<endl;
skip += sizeof(long);
inOffsets = (uintT*) (s + skip);
skip += (n+1)*sizeof(uintT);
inDegrees = (uintE*)(s+skip);
skip += n*sizeof(uintE);
inEdges = (uchar*)(s + skip);
} else {
inOffsets = offsets;
inEdges = edges;
inDegrees = Degrees;
}
vertex *V = newA(vertex,n);
parallel_for(long i=0;i<n;i++) {
long o = offsets[i];
uintT d = Degrees[i];
V[i].setOutDegree(d);
V[i].setOutNeighbors(edges+o);
}
if(sizeof(vertex) == sizeof(compressedAsymmetricVertex)){
parallel_for(long i=0;i<n;i++) {
long o = inOffsets[i];
uintT d = inDegrees[i];
V[i].setInDegree(d);
V[i].setInNeighbors(inEdges+o);
}
}
cout << "creating graph..."<<endl;
Compressed_Mem<vertex>* mem = new Compressed_Mem<vertex>(V, s);
graph<vertex> G(V,n,m,mem);
return G;
}
/* prefix sum used by the preprocess function defined below */
static pvector<uintT> ParallelPrefixSum (const pvector<uintT> °rees) {
const size_t block_size = 1<<20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<uintT> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
uintT lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<uintT> bulk_prefix(num_blocks+1);
uintT total = 0;
for (size_t block=0; block < num_blocks; block++) {
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<uintT> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
uintT local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++) {
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
/*
Preprocess a graph based on outdegrees or indegrees
PageRank Optimizations for directed graphs -
1) We do not create a new outNeighbors list (because it pull-only)
2) We only create new out-degrees because PR uses it during computation
*/
template <class vertex>
graph<vertex> preprocessGraph(graph<vertex> GA, bool isSym, bool useOutdeg,
pvector<uintE>& new_ids, bool isPageRank = false)
{
Timer t;
t.Start();
auto numVertices = GA.n;
auto numEdges = GA.m;
vertex *origG = GA.V;
typedef std::pair<uintT, uintE> degree_nodeid_t;
pvector<degree_nodeid_t> degree_id_pairs(numVertices);
if (!isSym) {
/* directed graph */
/* STEP I - collect degrees of all vertices */
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
vertex vtx = origG[v];
if (useOutdeg) {
degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v);
}
else {
degree_id_pairs[v] = std::make_pair(vtx.getInDegree(), v);
}
}
/* Step II - sort the degrees in parallel */
__gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_nodeid_t>());
/* Step III - make a remap based on the sorted degree list */
pvector<uintT> degrees(numVertices);
pvector<uintT> inv_degrees(numVertices);
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
degrees[v] = degree_id_pairs[v].first;
auto origID = degree_id_pairs[v].second;
new_ids[origID] = v;
vertex vtx = origG[origID];
if (useOutdeg) {
inv_degrees[v] = vtx.getInDegree();
}
else {
inv_degrees[v] = vtx.getOutDegree();
}
}
//clearing space from degree pairs
pvector<degree_nodeid_t>().swap(degree_id_pairs);
/* Step IV - make a new vertex list for the new graph */
pvector<uintT> offsets = ParallelPrefixSum(degrees);
pvector<uintT> inv_offsets = ParallelPrefixSum(inv_degrees);
//clearing space from degree lists
pvector<uintT>().swap(degrees);
pvector<uintT>().swap(inv_degrees);
#ifndef WEIGHTED
uintE* outEdges = newA(uintE, numEdges);
uintE* inEdges = newA(uintE, numEdges);
#else
intE* outEdges = newA(intE, 2 * numEdges);
intE* inEdges = newA(intE, 2 * numEdges);
#endif
vertex* newV = newA(vertex, numVertices);
#pragma omp parallel for schedule (dynamic, 1024)
for (uintE v = 0; v < numVertices; ++v) {
/* note that vertex IDs u and v belong to the space of original vertex IDs */
if (!isPageRank) {
//copy out-neighbors
auto newID = new_ids[v];
newV[newID].setOutDegree(origG[v].getOutDegree());
#ifndef WEIGHTED
if (useOutdeg)
newV[newID].setOutNeighbors(outEdges + offsets[newID]);
else
newV[newID].setOutNeighbors(outEdges + inv_offsets[newID]);
#else
if (useOutdeg)
newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]);
else
newV[newID].setOutNeighbors(outEdges + 2 * inv_offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getOutDegree(); ++u) {
auto origNgh = origG[v].getOutNeighbor(u);
newV[newID].setOutNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setOutWeight(u, origG[v].getOutWeight(u));
#endif
}
//copy in-neighbors
newV[newID].setInDegree(origG[v].getInDegree());
#ifndef WEIGHTED
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + offsets[newID]);
#else
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + 2 * inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + 2 * offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getInDegree(); ++u) {
auto origNgh = origG[v].getInNeighbor(u);
newV[newID].setInNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setInWeight(u, origG[v].getInWeight(u));
#endif
}
}
else {
/* PageRank - no need to apply weighted conditionals */
//copy in-neighbors
auto newID = new_ids[v];
newV[newID].setInDegree(origG[v].getInDegree());
if (useOutdeg)
newV[newID].setInNeighbors(inEdges + inv_offsets[newID]);
else
newV[newID].setInNeighbors(inEdges + offsets[newID]);
for (uintE u = 0; u < origG[v].getInDegree(); ++u) {
auto origNgh = origG[v].getInNeighbor(u);
newV[newID].setInNeighbor(u, new_ids[origNgh]);
}
//only set out-degrees
newV[newID].setOutDegree(origG[v].getOutDegree());
}
}
/* Step V - make the new graph */
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges,inEdges);
t.Stop();
t.PrintTime("DegSort Time", t.Seconds());
return graph<vertex>(newV,numVertices,numEdges,mem);
}
else {
/* undirected graph */
/* STEP I - collect degrees of all vertices */
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
vertex vtx = origG[v];
degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v);
}
/* Step II - sort the degrees in parallel */
__gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_nodeid_t>());
/* Step III - make a remap based on the sorted degree list */
pvector<uintT> degrees(numVertices);
#pragma omp parallel for
for (uintE v = 0; v < numVertices; ++v) {
degrees[v] = degree_id_pairs[v].first;
auto origID = degree_id_pairs[v].second;
new_ids[origID] = v;
}
//clearing space from degree pairs
pvector<degree_nodeid_t>().swap(degree_id_pairs);
/* Step IV - make a new vertex list for the new graph */
pvector<uintT> offsets = ParallelPrefixSum(degrees);
//clearing space from degrees
pvector<uintT>().swap(degrees);
#ifndef WEIGHTED
uintE* outEdges = newA(uintE, numEdges);
#else
intE* outEdges = newA(intE, 2 * numEdges);
#endif
vertex* newV = newA(vertex, numVertices);
#pragma omp parallel for schedule (dynamic, 1024)
for (uintE v = 0; v < numVertices; ++v) {
/* note that vertex IDs u and v belong to the space of original vertex IDs */
//copy neighbors
auto newID = new_ids[v];
newV[newID].setOutDegree(origG[v].getOutDegree());
#ifndef WEIGHTED
newV[newID].setOutNeighbors(outEdges + offsets[newID]);
#else
newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]);
#endif
for (uintE u = 0; u < origG[v].getOutDegree(); ++u) {
auto origNgh = origG[v].getOutNeighbor(u);
newV[newID].setOutNeighbor(u, new_ids[origNgh]);
#ifdef WEIGHTED
newV[newID].setOutWeight(u, origG[v].getOutWeight(u));
#endif
}
}
/* Step V - make the new graph */
Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges);
t.Stop();
t.PrintTime("DegSort Time", t.Seconds());
return graph<vertex>(newV,numVertices,numEdges,mem);
}
}
/*
Determine if the structure of the graph is amenable to benefit from
lightweight reordering techniques.
The implementation is a simple scan of the entire vertex space
to find the cache lines that contain atleast one hub
NOTE: we found that reordering is most effective for pull-based apps.
Hence, the following function assumes out-degree sorting by default
*/
template <class vertex>
bool computePackingFactor(graph<vertex> GA, bool isSym, bool useOutdeg, size_t elemSz) {
Timer t;
t.Start();
auto numVertices = GA.n;
auto numEdges = GA.m;
vertex *origG = GA.V;
uintT avgDegree = numEdges / numVertices;
size_t cacheBlkSz {64};
size_t vDataSz = numVertices * elemSz; //Total size of vData array in Bytes
size_t numCacheBlocks = (vDataSz + (cacheBlkSz-1)) / cacheBlkSz; //number of cache blocks to completely store entire vData
size_t vtxPerBlk {0};
size_t hubCacheBlocks {0};
size_t numHubs {0};
double hotSetSize_before {0};
double hotSetSize_after {0};
double packingFactor {0};
if (elemSz < cacheBlkSz) {
vtxPerBlk = cacheBlkSz / elemSz;
#pragma omp parallel for reduction (+ : hubCacheBlocks, numHubs)
for (uintE b = 0; b < numCacheBlocks; ++b) {
bool hasHubs {false};
for (uintE v = b * vtxPerBlk; v < (b+1) * vtxPerBlk; ++v) {
if (origG[v].getOutDegree() > avgDegree) {
hasHubs = true;
++numHubs;
}
}
if (hasHubs) {
++hubCacheBlocks;
}
}
hotSetSize_before = hubCacheBlocks * 64;
hotSetSize_after = ((numHubs + (vtxPerBlk-1)) / (vtxPerBlk)) * 64;
hotSetSize_after = (((numHubs*elemSz) + (cacheBlkSz-1)) / (cacheBlkSz)) * 64;
packingFactor = static_cast<double>(hotSetSize_before) / static_cast<double>(hotSetSize_after);
}
t.Stop();
t.PrintTime("Packing Factor Time(in s)", t.Seconds());
std::cout << "Number of hubs = " << numHubs << std::endl;
std::cout << "HotSet size in MB (before reordering) = " << static_cast<double>(hotSetSize_before) / (1024 * 1024) << std::endl;
std::cout << "HotSet size in MB (after reordering) = " << static_cast<double>(hotSetSize_after) / (1024 * 1024) << std::endl;
std::cout << "Packing Factor = " << packingFactor << std::endl;
bool result = packingFactor > THRESHOLD;
return result;
}
|
app.c | /**
* Christina Giannoula
* cgiannoula: christina.giann@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct BDBCSRMatrix* A;
static struct BDCSRMatrix* B;
static struct COOMatrix* C;
static val_dt* x;
static val_dt* y;
static val_dt* z;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t block_rows_per_dpu;
uint32_t prev_block_rows_dpu;
uint32_t cols_per_dpu;
uint32_t block_start;
uint32_t blocks;
uint32_t blocks_pad;
uint32_t prev_blocks_dpu;
uint32_t ptr_offset;
uint32_t merge;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_row_partition
* @param factor n to create partitions
* @param column_partitions to create vert_partitions
* @param horz_partitions to return the 2D partitioning
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
static void spmv_host(val_dt* y, struct BDBCSRMatrix *A, val_dt* x) {
uint64_t total_blocks = 0;
for (uint32_t c = 0; c < A->vert_partitions; c++) {
uint32_t ptr_offset = c * (A->num_block_rows + 1);
for(uint64_t n=0; n < A->num_block_rows; n++) {
for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){
uint64_t j = A->bcolind[total_blocks + i];
for(uint64_t blr=0; blr < A->row_block_size; blr++){
val_dt acc = 0;
for(uint64_t blc=0; blc < A->col_block_size; blc++) {
acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[A->vert_tile_widths[c] + j * A->col_block_size + blc];
}
y[n * A->row_block_size + blr] += acc;
}
}
}
total_blocks += A->blocks_per_vert_partition[c];
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
uint32_t nr_of_ranks;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
C = readCOOMatrix(p.fileName);
sortCOOMatrix(C);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
B = coo2bdcsr(C, horz_partitions, vert_partitions);
freeCOOMatrix(C);
A = bdcsr2bdbcsr(B, p.row_blsize, p.col_blsize);
countNNZperBlockBDBCSRMatrix(A);
freeBDCSRMatrix(B);
// Initialize partition data
part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS);
#if FG_TRANS
struct dpu_set_t rank;
uint32_t each_rank;
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank;
}
int sum = 0;
for(int i=0; i < p.max_nranks+1; i++) {
part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum;
sum += part_info->active_dpus_per_rank[i];
}
#endif
// Initialize help data - Padding needed
uint32_t ncols_pad = A->ncols + A->max_tile_width + A->col_block_size;
uint32_t tile_width_pad = A->num_block_cols * A->col_block_size;
uint32_t nrows_pad = A->nrows + A->row_block_size;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
#if INT8
if (tile_width_pad % 2 != 0)
tile_width_pad++;
#endif
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Allocate output vector
z = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Load-balance nnzs (block-row granularity) across DPUs of the same vertical partition
partition_by_nnz(A, part_info);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_block_rows_per_dpu = 0;
uint64_t max_blocks_per_dpu = 0;
// Timer for measurements
Timer timer;
i = 0;
uint32_t acc_blocks = 0;
uint32_t total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for block rows and non-zero elements needed for CPU-DPU transfers
uint32_t tile_horz_indx = i % A->horz_partitions;
uint32_t tile_vert_indx = i / A->horz_partitions;
uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx];
uint32_t block_rows_per_dpu_pad = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx] + 1;
uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx];
if (block_rows_per_dpu_pad > max_block_rows_per_dpu)
max_block_rows_per_dpu = block_rows_per_dpu_pad;
unsigned int blocks, blocks_pad;
blocks = A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu + block_rows_per_dpu] - A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu];
assert(blocks == part_info->blocks_dpu[i]);
if (blocks % 2 != 0) // bcolind
blocks_pad = blocks + 1;
else
blocks_pad = blocks;
if (blocks_pad > max_blocks_per_dpu)
max_blocks_per_dpu = blocks_pad;
// Keep information per DPU
dpu_info[i].block_rows_per_dpu = block_rows_per_dpu;
dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu;
dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx];
dpu_info[i].blocks = blocks;
dpu_info[i].blocks_pad = blocks_pad;
dpu_info[i].prev_blocks_dpu = total_blocks;
dpu_info[i].ptr_offset = tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu;
// Find input arguments per DPU
input_args[i].block_rows = block_rows_per_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].row_block_size = A->row_block_size;
input_args[i].col_block_size = A->col_block_size;
//input_args[i].blocks = blocks;
#if BLNC_TSKLT_BLOCK
// Load-balance blocks across tasklets
partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per tasklet
input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t];
input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)];
}
if (tile_horz_indx == (A->horz_partitions - 1))
acc_blocks += A->blocks_per_vert_partition[tile_vert_indx];
total_blocks += part_info->blocks_dpu[i];
}
#if FG_TRANS
// Find max number of block rows (subset of elements of the output vector) among DPUs of each rank
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t max_block_rows_cur_rank = 0;
uint32_t max_cols_cur_rank = 0;
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank];
for (int k = 0; k < nr_dpus_in_rank; k++) {
if (start_dpu + k >= nr_of_dpus)
break;
if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank)
max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu;
if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank)
max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu;
}
// Padding
max_cols_cur_rank = ((max_cols_cur_rank + A->col_block_size - 1) / A->col_block_size) * A->col_block_size;
if (max_block_rows_cur_rank % 2 != 0)
max_block_rows_cur_rank++;
if (max_cols_cur_rank % (8 / byte_dt) != 0)
max_cols_cur_rank = max_cols_cur_rank + ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt)));
part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank;
part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank;
}
#endif
// Initializations for parallel transfers with padding needed
if (max_block_rows_per_dpu % 2 != 0)
max_block_rows_per_dpu++;
if (max_blocks_per_dpu % 2 != 0)
max_blocks_per_dpu++;
// Re-allocations for padding needed
A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt)));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_block_rows = max_block_rows_per_dpu;
input_args[i].max_blocks = max_blocks_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Browptr
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + dpu_info[i].ptr_offset));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bcolind
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bvalues
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size)));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
#if CG_TRANS
// Coarse-grained data transfers in the input vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the input vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
i = 0;
//struct dpu_set_t rank;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (default: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
#if CG_TRANS
// Coarse-grained data transfers in the output vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the output vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
i = 0;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t, b;
for (c = 0; c < A->vert_partitions; c++) {
for (r = 0; r < A->horz_partitions; r++) {
#pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, r, c) private(t, b)
for (t = 0; t < part_info->brow_split[c * (A->horz_partitions + 1) + r+1] - part_info->brow_split[c * (A->horz_partitions + 1) + r]; t++) {
for (b = 0; b < A->row_block_size; b++) {
z[(part_info->brow_split[c * (A->horz_partitions + 1) + r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b];
}
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
startTimer(&timer, 4);
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (i = 0; i < A->nrows; i++) {
if(y_host[i] != z[i]) {
status = false;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeBDBCSRMatrix(A);
free(x);
free(z);
free(y);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
GB_dense_subassign_25_template.c | //------------------------------------------------------------------------------
// GB_dense_subassign_25_template: C<M> = A where C is empty and A is dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M> = A where C starts as empty, M is structural, and A is dense. The
// pattern of C is an exact copy of M. A is full, dense, or bitmap.
// M is sparse or hypersparse, and C is constructed with the same pattern as M.
{
//--------------------------------------------------------------------------
// get C, M, and A
//--------------------------------------------------------------------------
ASSERT (GB_sparsity (M) == GB_sparsity (C)) ;
GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ;
int64_t *GB_RESTRICT Ci = C->i ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
const int64_t *GB_RESTRICT Mp = M->p ;
const int64_t *GB_RESTRICT Mh = M->h ;
const int64_t *GB_RESTRICT Mi = M->i ;
const int64_t mvlen = M->vlen ;
const bool A_is_bitmap = GB_IS_BITMAP (A) ;
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ;
const int8_t *GB_RESTRICT Ab = A->b ;
const int64_t avlen = A->vlen ;
//--------------------------------------------------------------------------
// C<M> = A
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// A is bitmap, so zombies can be created in C
//----------------------------------------------------------------------
int64_t nzombies = 0 ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
int64_t task_nzombies = 0 ;
//------------------------------------------------------------------
// C<M(:,kfirst:klast)> = A(:,kfirst:klast)
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// find the part of M(:,k) to be operated on by this task
//--------------------------------------------------------------
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, tid, k,
kfirst, klast, pstart_slice, Mp, mvlen) ;
//--------------------------------------------------------------
// C<M(:,j)> = A(:,j)
//--------------------------------------------------------------
// M is hypersparse or sparse. C is the same as M.
// pA points to the start of A(:,j) since A is dense
int64_t pA = j * avlen ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
int64_t i = Mi [pM] ;
int64_t p = pA + i ;
if (Ab [p])
{
// C(i,j) = A(i,j)
GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p]
}
else
{
// C(i,j) becomes a zombie
task_nzombies++ ;
Ci [pM] = GB_FLIP (i) ;
}
}
}
nzombies += task_nzombies ;
}
C->nzombies = nzombies ;
}
else
{
//----------------------------------------------------------------------
// A is full, so no zombies will appear in C
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//------------------------------------------------------------------
// C<M(:,kfirst:klast)> = A(:,kfirst:klast)
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// find the part of M(:,k) to be operated on by this task
//--------------------------------------------------------------
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, tid, k,
kfirst, klast, pstart_slice, Mp, mvlen) ;
//--------------------------------------------------------------
// C<M(:,j)> = A(:,j)
//--------------------------------------------------------------
// M is hypersparse or sparse. C is the same as M.
// pA points to the start of A(:,j) since A is dense
int64_t pA = j * avlen ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
int64_t p = pA + GBI (Mi, pM, mvlen) ;
GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p]
}
}
}
}
}
|
GB_unop__log1p_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fp32_fp32)
// op(A') function: GB (_unop_tran__log1p_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log1pf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log1pf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trsm_x_dia_u_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_INT main_diag_pos = 0;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT i = 0; i < A->ndiag; i++)
if(A->distance[i] == 0)
{
main_diag_pos = i;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ndiag = 0; ndiag < main_diag_pos; ndiag++)
{
if (-A->distance[ndiag] <= r)
{
ALPHA_INT ac = r + A->distance[ndiag];
alpha_madde(temp, A->values[ndiag * A->lval + r], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(y[r * ldy + out_y_col], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
mixed_tentusscher_myo_epi_2004_S3_13.c | // Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt + Rc)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S3_13.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6249845555192,0.00127452925982209,0.781098854878912,0.780945505139612,0.000173127258213963,0.485680542843999,0.00292844596868805,0.999998366997264,1.91530092199862e-08,1.87681747950326e-05,0.999774940058991,1.00702552778216,0.999994275830871,4.68103990785171e-05,0.397558769683448,10.1166549211387,139.567494437918};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2937045632517,0.000331567271096668,0.000125123240971326,0.000319780240937142,0.230930142679641,0.142554278260413,0.156333434028122,4.66122435867929,0.0134411682726080,1.80597248717533,1099.57883152268,0.000468845160350493,0.251300465852520,0.0155860481845978,0.00288945677711972,6.05964606931935e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
acado_solver.c | /*
* This file was auto-generated using the ACADO Toolkit.
*
* While ACADO Toolkit is free software released under the terms of
* the GNU Lesser General Public License (LGPL), the generated code
* as such remains the property of the user who used ACADO Toolkit
* to generate this code. In particular, user dependent data of the code
* do not inherit the GNU LGPL license. On the other hand, parts of the
* generated code that are a direct copy of source code from the
* ACADO Toolkit or the software tools it is based on, remain, as derived
* work, automatically covered by the LGPL license.
*
* ACADO Toolkit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "acado_common.h"
/******************************************************************************/
/* */
/* ACADO code generation */
/* */
/******************************************************************************/
/** Row vector of size: 30 */
real_t state[ 30 ];
int acado_modelSimulation( )
{
int ret;
int lRun1;
ret = 0;
#pragma omp parallel for private(lRun1, state) shared(acadoWorkspace, acadoVariables)
for (lRun1 = 0; lRun1 < 16; ++lRun1)
{
state[0] = acadoVariables.x[lRun1 * 4];
state[1] = acadoVariables.x[lRun1 * 4 + 1];
state[2] = acadoVariables.x[lRun1 * 4 + 2];
state[3] = acadoVariables.x[lRun1 * 4 + 3];
state[28] = acadoVariables.u[lRun1 * 2];
state[29] = acadoVariables.u[lRun1 * 2 + 1];
ret = acado_integrate(state, 1);
acadoWorkspace.d[lRun1 * 4] = state[0] - acadoVariables.x[lRun1 * 4 + 4];
acadoWorkspace.d[lRun1 * 4 + 1] = state[1] - acadoVariables.x[lRun1 * 4 + 5];
acadoWorkspace.d[lRun1 * 4 + 2] = state[2] - acadoVariables.x[lRun1 * 4 + 6];
acadoWorkspace.d[lRun1 * 4 + 3] = state[3] - acadoVariables.x[lRun1 * 4 + 7];
acadoWorkspace.evGx[lRun1 * 16] = state[4];
acadoWorkspace.evGx[lRun1 * 16 + 1] = state[5];
acadoWorkspace.evGx[lRun1 * 16 + 2] = state[6];
acadoWorkspace.evGx[lRun1 * 16 + 3] = state[7];
acadoWorkspace.evGx[lRun1 * 16 + 4] = state[8];
acadoWorkspace.evGx[lRun1 * 16 + 5] = state[9];
acadoWorkspace.evGx[lRun1 * 16 + 6] = state[10];
acadoWorkspace.evGx[lRun1 * 16 + 7] = state[11];
acadoWorkspace.evGx[lRun1 * 16 + 8] = state[12];
acadoWorkspace.evGx[lRun1 * 16 + 9] = state[13];
acadoWorkspace.evGx[lRun1 * 16 + 10] = state[14];
acadoWorkspace.evGx[lRun1 * 16 + 11] = state[15];
acadoWorkspace.evGx[lRun1 * 16 + 12] = state[16];
acadoWorkspace.evGx[lRun1 * 16 + 13] = state[17];
acadoWorkspace.evGx[lRun1 * 16 + 14] = state[18];
acadoWorkspace.evGx[lRun1 * 16 + 15] = state[19];
acadoWorkspace.evGu[lRun1 * 8] = state[20];
acadoWorkspace.evGu[lRun1 * 8 + 1] = state[21];
acadoWorkspace.evGu[lRun1 * 8 + 2] = state[22];
acadoWorkspace.evGu[lRun1 * 8 + 3] = state[23];
acadoWorkspace.evGu[lRun1 * 8 + 4] = state[24];
acadoWorkspace.evGu[lRun1 * 8 + 5] = state[25];
acadoWorkspace.evGu[lRun1 * 8 + 6] = state[26];
acadoWorkspace.evGu[lRun1 * 8 + 7] = state[27];
}
return ret;
}
void acado_evaluateLSQ(const real_t* in, real_t* out)
{
const real_t* xd = in;
const real_t* u = in + 4;
/* Compute outputs: */
out[0] = xd[0];
out[1] = xd[1];
out[2] = xd[2];
out[3] = xd[3];
out[4] = u[0];
out[5] = u[1];
}
void acado_evaluateLSQEndTerm(const real_t* in, real_t* out)
{
const real_t* xd = in;
/* Compute outputs: */
out[0] = xd[0];
out[1] = xd[1];
out[2] = xd[2];
out[3] = xd[3];
}
void acado_setObjQ1Q2( real_t* const tmpObjS, real_t* const tmpQ1, real_t* const tmpQ2 )
{
tmpQ2[0] = +tmpObjS[0];
tmpQ2[1] = +tmpObjS[1];
tmpQ2[2] = +tmpObjS[2];
tmpQ2[3] = +tmpObjS[3];
tmpQ2[4] = +tmpObjS[4];
tmpQ2[5] = +tmpObjS[5];
tmpQ2[6] = +tmpObjS[6];
tmpQ2[7] = +tmpObjS[7];
tmpQ2[8] = +tmpObjS[8];
tmpQ2[9] = +tmpObjS[9];
tmpQ2[10] = +tmpObjS[10];
tmpQ2[11] = +tmpObjS[11];
tmpQ2[12] = +tmpObjS[12];
tmpQ2[13] = +tmpObjS[13];
tmpQ2[14] = +tmpObjS[14];
tmpQ2[15] = +tmpObjS[15];
tmpQ2[16] = +tmpObjS[16];
tmpQ2[17] = +tmpObjS[17];
tmpQ2[18] = +tmpObjS[18];
tmpQ2[19] = +tmpObjS[19];
tmpQ2[20] = +tmpObjS[20];
tmpQ2[21] = +tmpObjS[21];
tmpQ2[22] = +tmpObjS[22];
tmpQ2[23] = +tmpObjS[23];
tmpQ1[0] = + tmpQ2[0];
tmpQ1[1] = + tmpQ2[1];
tmpQ1[2] = + tmpQ2[2];
tmpQ1[3] = + tmpQ2[3];
tmpQ1[4] = + tmpQ2[6];
tmpQ1[5] = + tmpQ2[7];
tmpQ1[6] = + tmpQ2[8];
tmpQ1[7] = + tmpQ2[9];
tmpQ1[8] = + tmpQ2[12];
tmpQ1[9] = + tmpQ2[13];
tmpQ1[10] = + tmpQ2[14];
tmpQ1[11] = + tmpQ2[15];
tmpQ1[12] = + tmpQ2[18];
tmpQ1[13] = + tmpQ2[19];
tmpQ1[14] = + tmpQ2[20];
tmpQ1[15] = + tmpQ2[21];
}
void acado_setObjR1R2( real_t* const tmpObjS, real_t* const tmpR1, real_t* const tmpR2 )
{
tmpR2[0] = +tmpObjS[24];
tmpR2[1] = +tmpObjS[25];
tmpR2[2] = +tmpObjS[26];
tmpR2[3] = +tmpObjS[27];
tmpR2[4] = +tmpObjS[28];
tmpR2[5] = +tmpObjS[29];
tmpR2[6] = +tmpObjS[30];
tmpR2[7] = +tmpObjS[31];
tmpR2[8] = +tmpObjS[32];
tmpR2[9] = +tmpObjS[33];
tmpR2[10] = +tmpObjS[34];
tmpR2[11] = +tmpObjS[35];
tmpR1[0] = + tmpR2[4];
tmpR1[1] = + tmpR2[5];
tmpR1[2] = + tmpR2[10];
tmpR1[3] = + tmpR2[11];
}
void acado_setObjQN1QN2( real_t* const tmpObjSEndTerm, real_t* const tmpQN1, real_t* const tmpQN2 )
{
tmpQN2[0] = +tmpObjSEndTerm[0];
tmpQN2[1] = +tmpObjSEndTerm[1];
tmpQN2[2] = +tmpObjSEndTerm[2];
tmpQN2[3] = +tmpObjSEndTerm[3];
tmpQN2[4] = +tmpObjSEndTerm[4];
tmpQN2[5] = +tmpObjSEndTerm[5];
tmpQN2[6] = +tmpObjSEndTerm[6];
tmpQN2[7] = +tmpObjSEndTerm[7];
tmpQN2[8] = +tmpObjSEndTerm[8];
tmpQN2[9] = +tmpObjSEndTerm[9];
tmpQN2[10] = +tmpObjSEndTerm[10];
tmpQN2[11] = +tmpObjSEndTerm[11];
tmpQN2[12] = +tmpObjSEndTerm[12];
tmpQN2[13] = +tmpObjSEndTerm[13];
tmpQN2[14] = +tmpObjSEndTerm[14];
tmpQN2[15] = +tmpObjSEndTerm[15];
tmpQN1[0] = + tmpQN2[0];
tmpQN1[1] = + tmpQN2[1];
tmpQN1[2] = + tmpQN2[2];
tmpQN1[3] = + tmpQN2[3];
tmpQN1[4] = + tmpQN2[4];
tmpQN1[5] = + tmpQN2[5];
tmpQN1[6] = + tmpQN2[6];
tmpQN1[7] = + tmpQN2[7];
tmpQN1[8] = + tmpQN2[8];
tmpQN1[9] = + tmpQN2[9];
tmpQN1[10] = + tmpQN2[10];
tmpQN1[11] = + tmpQN2[11];
tmpQN1[12] = + tmpQN2[12];
tmpQN1[13] = + tmpQN2[13];
tmpQN1[14] = + tmpQN2[14];
tmpQN1[15] = + tmpQN2[15];
}
void acado_evaluateObjective( )
{
int runObj;
for (runObj = 0; runObj < 16; ++runObj)
{
acadoWorkspace.objValueIn[0] = acadoVariables.x[runObj * 4];
acadoWorkspace.objValueIn[1] = acadoVariables.x[runObj * 4 + 1];
acadoWorkspace.objValueIn[2] = acadoVariables.x[runObj * 4 + 2];
acadoWorkspace.objValueIn[3] = acadoVariables.x[runObj * 4 + 3];
acadoWorkspace.objValueIn[4] = acadoVariables.u[runObj * 2];
acadoWorkspace.objValueIn[5] = acadoVariables.u[runObj * 2 + 1];
acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.Dy[runObj * 6] = acadoWorkspace.objValueOut[0];
acadoWorkspace.Dy[runObj * 6 + 1] = acadoWorkspace.objValueOut[1];
acadoWorkspace.Dy[runObj * 6 + 2] = acadoWorkspace.objValueOut[2];
acadoWorkspace.Dy[runObj * 6 + 3] = acadoWorkspace.objValueOut[3];
acadoWorkspace.Dy[runObj * 6 + 4] = acadoWorkspace.objValueOut[4];
acadoWorkspace.Dy[runObj * 6 + 5] = acadoWorkspace.objValueOut[5];
acado_setObjQ1Q2( &(acadoVariables.W[ runObj * 36 ]), &(acadoWorkspace.Q1[ runObj * 16 ]), &(acadoWorkspace.Q2[ runObj * 24 ]) );
acado_setObjR1R2( &(acadoVariables.W[ runObj * 36 ]), &(acadoWorkspace.R1[ runObj * 4 ]), &(acadoWorkspace.R2[ runObj * 12 ]) );
}
acadoWorkspace.objValueIn[0] = acadoVariables.x[64];
acadoWorkspace.objValueIn[1] = acadoVariables.x[65];
acadoWorkspace.objValueIn[2] = acadoVariables.x[66];
acadoWorkspace.objValueIn[3] = acadoVariables.x[67];
acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0];
acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1];
acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2];
acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3];
acado_setObjQN1QN2( acadoVariables.WN, acadoWorkspace.QN1, acadoWorkspace.QN2 );
}
void acado_multGxd( real_t* const dOld, real_t* const Gx1, real_t* const dNew )
{
dNew[0] += + Gx1[0]*dOld[0] + Gx1[1]*dOld[1] + Gx1[2]*dOld[2] + Gx1[3]*dOld[3];
dNew[1] += + Gx1[4]*dOld[0] + Gx1[5]*dOld[1] + Gx1[6]*dOld[2] + Gx1[7]*dOld[3];
dNew[2] += + Gx1[8]*dOld[0] + Gx1[9]*dOld[1] + Gx1[10]*dOld[2] + Gx1[11]*dOld[3];
dNew[3] += + Gx1[12]*dOld[0] + Gx1[13]*dOld[1] + Gx1[14]*dOld[2] + Gx1[15]*dOld[3];
}
void acado_moveGxT( real_t* const Gx1, real_t* const Gx2 )
{
Gx2[0] = Gx1[0];
Gx2[1] = Gx1[1];
Gx2[2] = Gx1[2];
Gx2[3] = Gx1[3];
Gx2[4] = Gx1[4];
Gx2[5] = Gx1[5];
Gx2[6] = Gx1[6];
Gx2[7] = Gx1[7];
Gx2[8] = Gx1[8];
Gx2[9] = Gx1[9];
Gx2[10] = Gx1[10];
Gx2[11] = Gx1[11];
Gx2[12] = Gx1[12];
Gx2[13] = Gx1[13];
Gx2[14] = Gx1[14];
Gx2[15] = Gx1[15];
}
void acado_multGxGx( real_t* const Gx1, real_t* const Gx2, real_t* const Gx3 )
{
Gx3[0] = + Gx1[0]*Gx2[0] + Gx1[1]*Gx2[4] + Gx1[2]*Gx2[8] + Gx1[3]*Gx2[12];
Gx3[1] = + Gx1[0]*Gx2[1] + Gx1[1]*Gx2[5] + Gx1[2]*Gx2[9] + Gx1[3]*Gx2[13];
Gx3[2] = + Gx1[0]*Gx2[2] + Gx1[1]*Gx2[6] + Gx1[2]*Gx2[10] + Gx1[3]*Gx2[14];
Gx3[3] = + Gx1[0]*Gx2[3] + Gx1[1]*Gx2[7] + Gx1[2]*Gx2[11] + Gx1[3]*Gx2[15];
Gx3[4] = + Gx1[4]*Gx2[0] + Gx1[5]*Gx2[4] + Gx1[6]*Gx2[8] + Gx1[7]*Gx2[12];
Gx3[5] = + Gx1[4]*Gx2[1] + Gx1[5]*Gx2[5] + Gx1[6]*Gx2[9] + Gx1[7]*Gx2[13];
Gx3[6] = + Gx1[4]*Gx2[2] + Gx1[5]*Gx2[6] + Gx1[6]*Gx2[10] + Gx1[7]*Gx2[14];
Gx3[7] = + Gx1[4]*Gx2[3] + Gx1[5]*Gx2[7] + Gx1[6]*Gx2[11] + Gx1[7]*Gx2[15];
Gx3[8] = + Gx1[8]*Gx2[0] + Gx1[9]*Gx2[4] + Gx1[10]*Gx2[8] + Gx1[11]*Gx2[12];
Gx3[9] = + Gx1[8]*Gx2[1] + Gx1[9]*Gx2[5] + Gx1[10]*Gx2[9] + Gx1[11]*Gx2[13];
Gx3[10] = + Gx1[8]*Gx2[2] + Gx1[9]*Gx2[6] + Gx1[10]*Gx2[10] + Gx1[11]*Gx2[14];
Gx3[11] = + Gx1[8]*Gx2[3] + Gx1[9]*Gx2[7] + Gx1[10]*Gx2[11] + Gx1[11]*Gx2[15];
Gx3[12] = + Gx1[12]*Gx2[0] + Gx1[13]*Gx2[4] + Gx1[14]*Gx2[8] + Gx1[15]*Gx2[12];
Gx3[13] = + Gx1[12]*Gx2[1] + Gx1[13]*Gx2[5] + Gx1[14]*Gx2[9] + Gx1[15]*Gx2[13];
Gx3[14] = + Gx1[12]*Gx2[2] + Gx1[13]*Gx2[6] + Gx1[14]*Gx2[10] + Gx1[15]*Gx2[14];
Gx3[15] = + Gx1[12]*Gx2[3] + Gx1[13]*Gx2[7] + Gx1[14]*Gx2[11] + Gx1[15]*Gx2[15];
}
void acado_multGxGu( real_t* const Gx1, real_t* const Gu1, real_t* const Gu2 )
{
Gu2[0] = + Gx1[0]*Gu1[0] + Gx1[1]*Gu1[2] + Gx1[2]*Gu1[4] + Gx1[3]*Gu1[6];
Gu2[1] = + Gx1[0]*Gu1[1] + Gx1[1]*Gu1[3] + Gx1[2]*Gu1[5] + Gx1[3]*Gu1[7];
Gu2[2] = + Gx1[4]*Gu1[0] + Gx1[5]*Gu1[2] + Gx1[6]*Gu1[4] + Gx1[7]*Gu1[6];
Gu2[3] = + Gx1[4]*Gu1[1] + Gx1[5]*Gu1[3] + Gx1[6]*Gu1[5] + Gx1[7]*Gu1[7];
Gu2[4] = + Gx1[8]*Gu1[0] + Gx1[9]*Gu1[2] + Gx1[10]*Gu1[4] + Gx1[11]*Gu1[6];
Gu2[5] = + Gx1[8]*Gu1[1] + Gx1[9]*Gu1[3] + Gx1[10]*Gu1[5] + Gx1[11]*Gu1[7];
Gu2[6] = + Gx1[12]*Gu1[0] + Gx1[13]*Gu1[2] + Gx1[14]*Gu1[4] + Gx1[15]*Gu1[6];
Gu2[7] = + Gx1[12]*Gu1[1] + Gx1[13]*Gu1[3] + Gx1[14]*Gu1[5] + Gx1[15]*Gu1[7];
}
void acado_moveGuE( real_t* const Gu1, real_t* const Gu2 )
{
Gu2[0] = Gu1[0];
Gu2[1] = Gu1[1];
Gu2[2] = Gu1[2];
Gu2[3] = Gu1[3];
Gu2[4] = Gu1[4];
Gu2[5] = Gu1[5];
Gu2[6] = Gu1[6];
Gu2[7] = Gu1[7];
}
void acado_setBlockH11( int iRow, int iCol, real_t* const Gu1, real_t* const Gu2 )
{
acadoWorkspace.H[(iRow * 64) + (iCol * 2)] += + Gu1[0]*Gu2[0] + Gu1[2]*Gu2[2] + Gu1[4]*Gu2[4] + Gu1[6]*Gu2[6];
acadoWorkspace.H[(iRow * 64) + (iCol * 2 + 1)] += + Gu1[0]*Gu2[1] + Gu1[2]*Gu2[3] + Gu1[4]*Gu2[5] + Gu1[6]*Gu2[7];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2)] += + Gu1[1]*Gu2[0] + Gu1[3]*Gu2[2] + Gu1[5]*Gu2[4] + Gu1[7]*Gu2[6];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2 + 1)] += + Gu1[1]*Gu2[1] + Gu1[3]*Gu2[3] + Gu1[5]*Gu2[5] + Gu1[7]*Gu2[7];
}
void acado_setBlockH11_R1( int iRow, int iCol, real_t* const R11 )
{
acadoWorkspace.H[(iRow * 64) + (iCol * 2)] = R11[0];
acadoWorkspace.H[(iRow * 64) + (iCol * 2 + 1)] = R11[1];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2)] = R11[2];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2 + 1)] = R11[3];
}
void acado_zeroBlockH11( int iRow, int iCol )
{
acadoWorkspace.H[(iRow * 64) + (iCol * 2)] = 0.0000000000000000e+00;
acadoWorkspace.H[(iRow * 64) + (iCol * 2 + 1)] = 0.0000000000000000e+00;
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2)] = 0.0000000000000000e+00;
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2 + 1)] = 0.0000000000000000e+00;
}
void acado_copyHTH( int iRow, int iCol )
{
acadoWorkspace.H[(iRow * 64) + (iCol * 2)] = acadoWorkspace.H[(iCol * 64) + (iRow * 2)];
acadoWorkspace.H[(iRow * 64) + (iCol * 2 + 1)] = acadoWorkspace.H[(iCol * 64 + 32) + (iRow * 2)];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2)] = acadoWorkspace.H[(iCol * 64) + (iRow * 2 + 1)];
acadoWorkspace.H[(iRow * 64 + 32) + (iCol * 2 + 1)] = acadoWorkspace.H[(iCol * 64 + 32) + (iRow * 2 + 1)];
}
void acado_multQ1d( real_t* const Gx1, real_t* const dOld, real_t* const dNew )
{
dNew[0] = + Gx1[0]*dOld[0] + Gx1[1]*dOld[1] + Gx1[2]*dOld[2] + Gx1[3]*dOld[3];
dNew[1] = + Gx1[4]*dOld[0] + Gx1[5]*dOld[1] + Gx1[6]*dOld[2] + Gx1[7]*dOld[3];
dNew[2] = + Gx1[8]*dOld[0] + Gx1[9]*dOld[1] + Gx1[10]*dOld[2] + Gx1[11]*dOld[3];
dNew[3] = + Gx1[12]*dOld[0] + Gx1[13]*dOld[1] + Gx1[14]*dOld[2] + Gx1[15]*dOld[3];
}
void acado_multQN1d( real_t* const QN1, real_t* const dOld, real_t* const dNew )
{
dNew[0] = + acadoWorkspace.QN1[0]*dOld[0] + acadoWorkspace.QN1[1]*dOld[1] + acadoWorkspace.QN1[2]*dOld[2] + acadoWorkspace.QN1[3]*dOld[3];
dNew[1] = + acadoWorkspace.QN1[4]*dOld[0] + acadoWorkspace.QN1[5]*dOld[1] + acadoWorkspace.QN1[6]*dOld[2] + acadoWorkspace.QN1[7]*dOld[3];
dNew[2] = + acadoWorkspace.QN1[8]*dOld[0] + acadoWorkspace.QN1[9]*dOld[1] + acadoWorkspace.QN1[10]*dOld[2] + acadoWorkspace.QN1[11]*dOld[3];
dNew[3] = + acadoWorkspace.QN1[12]*dOld[0] + acadoWorkspace.QN1[13]*dOld[1] + acadoWorkspace.QN1[14]*dOld[2] + acadoWorkspace.QN1[15]*dOld[3];
}
void acado_multRDy( real_t* const R2, real_t* const Dy1, real_t* const RDy1 )
{
RDy1[0] = + R2[0]*Dy1[0] + R2[1]*Dy1[1] + R2[2]*Dy1[2] + R2[3]*Dy1[3] + R2[4]*Dy1[4] + R2[5]*Dy1[5];
RDy1[1] = + R2[6]*Dy1[0] + R2[7]*Dy1[1] + R2[8]*Dy1[2] + R2[9]*Dy1[3] + R2[10]*Dy1[4] + R2[11]*Dy1[5];
}
void acado_multQDy( real_t* const Q2, real_t* const Dy1, real_t* const QDy1 )
{
QDy1[0] = + Q2[0]*Dy1[0] + Q2[1]*Dy1[1] + Q2[2]*Dy1[2] + Q2[3]*Dy1[3] + Q2[4]*Dy1[4] + Q2[5]*Dy1[5];
QDy1[1] = + Q2[6]*Dy1[0] + Q2[7]*Dy1[1] + Q2[8]*Dy1[2] + Q2[9]*Dy1[3] + Q2[10]*Dy1[4] + Q2[11]*Dy1[5];
QDy1[2] = + Q2[12]*Dy1[0] + Q2[13]*Dy1[1] + Q2[14]*Dy1[2] + Q2[15]*Dy1[3] + Q2[16]*Dy1[4] + Q2[17]*Dy1[5];
QDy1[3] = + Q2[18]*Dy1[0] + Q2[19]*Dy1[1] + Q2[20]*Dy1[2] + Q2[21]*Dy1[3] + Q2[22]*Dy1[4] + Q2[23]*Dy1[5];
}
void acado_multEQDy( real_t* const E1, real_t* const QDy1, real_t* const U1 )
{
U1[0] += + E1[0]*QDy1[0] + E1[2]*QDy1[1] + E1[4]*QDy1[2] + E1[6]*QDy1[3];
U1[1] += + E1[1]*QDy1[0] + E1[3]*QDy1[1] + E1[5]*QDy1[2] + E1[7]*QDy1[3];
}
void acado_multQETGx( real_t* const E1, real_t* const Gx1, real_t* const H101 )
{
H101[0] += + E1[0]*Gx1[0] + E1[2]*Gx1[4] + E1[4]*Gx1[8] + E1[6]*Gx1[12];
H101[1] += + E1[0]*Gx1[1] + E1[2]*Gx1[5] + E1[4]*Gx1[9] + E1[6]*Gx1[13];
H101[2] += + E1[0]*Gx1[2] + E1[2]*Gx1[6] + E1[4]*Gx1[10] + E1[6]*Gx1[14];
H101[3] += + E1[0]*Gx1[3] + E1[2]*Gx1[7] + E1[4]*Gx1[11] + E1[6]*Gx1[15];
H101[4] += + E1[1]*Gx1[0] + E1[3]*Gx1[4] + E1[5]*Gx1[8] + E1[7]*Gx1[12];
H101[5] += + E1[1]*Gx1[1] + E1[3]*Gx1[5] + E1[5]*Gx1[9] + E1[7]*Gx1[13];
H101[6] += + E1[1]*Gx1[2] + E1[3]*Gx1[6] + E1[5]*Gx1[10] + E1[7]*Gx1[14];
H101[7] += + E1[1]*Gx1[3] + E1[3]*Gx1[7] + E1[5]*Gx1[11] + E1[7]*Gx1[15];
}
void acado_zeroBlockH10( real_t* const H101 )
{
{ int lCopy; for (lCopy = 0; lCopy < 8; lCopy++) H101[ lCopy ] = 0; }
}
void acado_multEDu( real_t* const E1, real_t* const U1, real_t* const dNew )
{
dNew[0] += + E1[0]*U1[0] + E1[1]*U1[1];
dNew[1] += + E1[2]*U1[0] + E1[3]*U1[1];
dNew[2] += + E1[4]*U1[0] + E1[5]*U1[1];
dNew[3] += + E1[6]*U1[0] + E1[7]*U1[1];
}
void acado_macETSlu( real_t* const E0, real_t* const g1 )
{
g1[0] += 0.0;
;
g1[1] += 0.0;
;
}
void acado_condensePrep( )
{
acado_moveGuE( acadoWorkspace.evGu, acadoWorkspace.E );
acado_moveGxT( &(acadoWorkspace.evGx[ 16 ]), acadoWorkspace.T );
acado_multGxd( acadoWorkspace.d, &(acadoWorkspace.evGx[ 16 ]), &(acadoWorkspace.d[ 4 ]) );
acado_multGxGx( acadoWorkspace.T, acadoWorkspace.evGx, &(acadoWorkspace.evGx[ 16 ]) );
acado_multGxGu( acadoWorkspace.T, acadoWorkspace.E, &(acadoWorkspace.E[ 8 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 8 ]), &(acadoWorkspace.E[ 16 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 32 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 4 ]), &(acadoWorkspace.evGx[ 32 ]), &(acadoWorkspace.d[ 8 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 16 ]), &(acadoWorkspace.evGx[ 32 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 8 ]), &(acadoWorkspace.E[ 24 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 16 ]), &(acadoWorkspace.E[ 32 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 16 ]), &(acadoWorkspace.E[ 40 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 48 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 8 ]), &(acadoWorkspace.evGx[ 48 ]), &(acadoWorkspace.d[ 12 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 32 ]), &(acadoWorkspace.evGx[ 48 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.E[ 48 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.E[ 56 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 40 ]), &(acadoWorkspace.E[ 64 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 24 ]), &(acadoWorkspace.E[ 72 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 64 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 12 ]), &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.d[ 16 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 48 ]), &(acadoWorkspace.evGx[ 64 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.E[ 80 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.E[ 88 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.E[ 96 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 72 ]), &(acadoWorkspace.E[ 104 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 32 ]), &(acadoWorkspace.E[ 112 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 80 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 16 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.d[ 20 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.evGx[ 80 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.E[ 120 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.E[ 128 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.E[ 136 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.E[ 144 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 112 ]), &(acadoWorkspace.E[ 152 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 40 ]), &(acadoWorkspace.E[ 160 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 96 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 20 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.d[ 24 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.evGx[ 96 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.E[ 168 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.E[ 176 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.E[ 184 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.E[ 192 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.E[ 200 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 160 ]), &(acadoWorkspace.E[ 208 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 48 ]), &(acadoWorkspace.E[ 216 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 112 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 24 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.d[ 28 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.evGx[ 112 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.E[ 224 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.E[ 232 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.E[ 240 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.E[ 248 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.E[ 256 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.E[ 264 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 216 ]), &(acadoWorkspace.E[ 272 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 56 ]), &(acadoWorkspace.E[ 280 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 128 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 28 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.d[ 32 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.evGx[ 128 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.E[ 288 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.E[ 296 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.E[ 304 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.E[ 312 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.E[ 320 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.E[ 328 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.E[ 336 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 280 ]), &(acadoWorkspace.E[ 344 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 64 ]), &(acadoWorkspace.E[ 352 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 144 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 32 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.d[ 36 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.evGx[ 144 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.E[ 360 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.E[ 368 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.E[ 376 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.E[ 384 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.E[ 392 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.E[ 400 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.E[ 408 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.E[ 416 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 352 ]), &(acadoWorkspace.E[ 424 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 72 ]), &(acadoWorkspace.E[ 432 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 160 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 36 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.d[ 40 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.evGx[ 160 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.E[ 440 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.E[ 448 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.E[ 456 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.E[ 464 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.E[ 472 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.E[ 480 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.E[ 488 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.E[ 496 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.E[ 504 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 432 ]), &(acadoWorkspace.E[ 512 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 80 ]), &(acadoWorkspace.E[ 520 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 176 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 40 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.d[ 44 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.evGx[ 176 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.E[ 528 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.E[ 536 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.E[ 544 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.E[ 552 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.E[ 560 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.E[ 568 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.E[ 576 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.E[ 584 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.E[ 592 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.E[ 600 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 520 ]), &(acadoWorkspace.E[ 608 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 88 ]), &(acadoWorkspace.E[ 616 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 192 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 44 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.d[ 48 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.evGx[ 192 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.E[ 624 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.E[ 632 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.E[ 640 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.E[ 648 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.E[ 656 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.E[ 664 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.E[ 672 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.E[ 680 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.E[ 688 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.E[ 696 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.E[ 704 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 616 ]), &(acadoWorkspace.E[ 712 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 96 ]), &(acadoWorkspace.E[ 720 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 208 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 48 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.d[ 52 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.evGx[ 208 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.E[ 728 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.E[ 736 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.E[ 744 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.E[ 752 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.E[ 760 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.E[ 768 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.E[ 776 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.E[ 784 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.E[ 792 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.E[ 800 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.E[ 808 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.E[ 816 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 720 ]), &(acadoWorkspace.E[ 824 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 104 ]), &(acadoWorkspace.E[ 832 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 224 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 52 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.d[ 56 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.evGx[ 224 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.E[ 840 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.E[ 848 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.E[ 856 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.E[ 864 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.E[ 872 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.E[ 880 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.E[ 888 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.E[ 896 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.E[ 904 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.E[ 912 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.E[ 920 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.E[ 928 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.E[ 936 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 832 ]), &(acadoWorkspace.E[ 944 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 112 ]), &(acadoWorkspace.E[ 952 ]) );
acado_moveGxT( &(acadoWorkspace.evGx[ 240 ]), acadoWorkspace.T );
acado_multGxd( &(acadoWorkspace.d[ 56 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.d[ 60 ]) );
acado_multGxGx( acadoWorkspace.T, &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.evGx[ 240 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.E[ 960 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.E[ 968 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.E[ 976 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.E[ 984 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.E[ 992 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.E[ 1000 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.E[ 1008 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.E[ 1016 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.E[ 1024 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.E[ 1032 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.E[ 1040 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.E[ 1048 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.E[ 1056 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.E[ 1064 ]) );
acado_multGxGu( acadoWorkspace.T, &(acadoWorkspace.E[ 952 ]), &(acadoWorkspace.E[ 1072 ]) );
acado_moveGuE( &(acadoWorkspace.evGu[ 120 ]), &(acadoWorkspace.E[ 1080 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 16 ]), acadoWorkspace.E, acadoWorkspace.QE );
acado_multGxGu( &(acadoWorkspace.Q1[ 32 ]), &(acadoWorkspace.E[ 8 ]), &(acadoWorkspace.QE[ 8 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 32 ]), &(acadoWorkspace.E[ 16 ]), &(acadoWorkspace.QE[ 16 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 48 ]), &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.QE[ 24 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 48 ]), &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.QE[ 32 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 48 ]), &(acadoWorkspace.E[ 40 ]), &(acadoWorkspace.QE[ 40 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 64 ]), &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QE[ 48 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 64 ]), &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.QE[ 56 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 64 ]), &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.QE[ 64 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 64 ]), &(acadoWorkspace.E[ 72 ]), &(acadoWorkspace.QE[ 72 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 80 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QE[ 88 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.QE[ 96 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.QE[ 104 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.E[ 112 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 120 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 128 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QE[ 136 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.QE[ 144 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.E[ 160 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 168 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 176 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 184 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QE[ 192 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.E[ 216 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 224 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 232 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 240 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 248 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.E[ 280 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 288 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 296 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 304 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 312 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.E[ 352 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 360 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 368 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 376 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 384 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.E[ 432 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 440 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 448 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 456 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 464 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.E[ 520 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 528 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 536 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 544 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 552 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.E[ 616 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 624 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 632 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 640 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 648 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.E[ 720 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 728 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 736 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 744 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 752 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.E[ 832 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 840 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 848 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 856 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 864 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_multGxGu( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.E[ 952 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 960 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 968 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 976 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 984 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1072 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ 1080 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_zeroBlockH10( acadoWorkspace.H10 );
acado_multQETGx( acadoWorkspace.QE, acadoWorkspace.evGx, acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 8 ]), &(acadoWorkspace.evGx[ 16 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 24 ]), &(acadoWorkspace.evGx[ 32 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 48 ]), &(acadoWorkspace.evGx[ 48 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 80 ]), &(acadoWorkspace.evGx[ 64 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 120 ]), &(acadoWorkspace.evGx[ 80 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 168 ]), &(acadoWorkspace.evGx[ 96 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 224 ]), &(acadoWorkspace.evGx[ 112 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 288 ]), &(acadoWorkspace.evGx[ 128 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 360 ]), &(acadoWorkspace.evGx[ 144 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 440 ]), &(acadoWorkspace.evGx[ 160 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 528 ]), &(acadoWorkspace.evGx[ 176 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 624 ]), &(acadoWorkspace.evGx[ 192 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 728 ]), &(acadoWorkspace.evGx[ 208 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 840 ]), &(acadoWorkspace.evGx[ 224 ]), acadoWorkspace.H10 );
acado_multQETGx( &(acadoWorkspace.QE[ 960 ]), &(acadoWorkspace.evGx[ 240 ]), acadoWorkspace.H10 );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 16 ]), &(acadoWorkspace.evGx[ 16 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 32 ]), &(acadoWorkspace.evGx[ 32 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 56 ]), &(acadoWorkspace.evGx[ 48 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 88 ]), &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 128 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 176 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 232 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 296 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 368 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 448 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 536 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 632 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 736 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 848 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 968 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 8 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 40 ]), &(acadoWorkspace.evGx[ 32 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 64 ]), &(acadoWorkspace.evGx[ 48 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 96 ]), &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 136 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 184 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 240 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 304 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 376 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 456 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 544 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 640 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 744 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 856 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 976 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 16 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 72 ]), &(acadoWorkspace.evGx[ 48 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 104 ]), &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 144 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 192 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 248 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 312 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 384 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 464 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 552 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 648 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 752 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 864 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 984 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 24 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 112 ]), &(acadoWorkspace.evGx[ 64 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 152 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 200 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 256 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 320 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 392 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 472 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 560 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 656 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 760 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 872 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 992 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 32 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 160 ]), &(acadoWorkspace.evGx[ 80 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 208 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 264 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 328 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 400 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 480 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 568 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 664 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 768 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 880 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1000 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 40 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 216 ]), &(acadoWorkspace.evGx[ 96 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 272 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 336 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 408 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 488 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 576 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 672 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 776 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 888 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1008 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 48 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 280 ]), &(acadoWorkspace.evGx[ 112 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 344 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 416 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 496 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 584 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 680 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 784 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 896 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1016 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 56 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 352 ]), &(acadoWorkspace.evGx[ 128 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 424 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 504 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 592 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 688 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 792 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 904 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1024 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 64 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 432 ]), &(acadoWorkspace.evGx[ 144 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 512 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 600 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 696 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 800 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 912 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1032 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 72 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 520 ]), &(acadoWorkspace.evGx[ 160 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 608 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 704 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 808 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 920 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1040 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 80 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 88 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 616 ]), &(acadoWorkspace.evGx[ 176 ]), &(acadoWorkspace.H10[ 88 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 712 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 88 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 816 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 88 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 928 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 88 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1048 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 88 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 96 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 720 ]), &(acadoWorkspace.evGx[ 192 ]), &(acadoWorkspace.H10[ 96 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 824 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 96 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 936 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 96 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1056 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 96 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 104 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 832 ]), &(acadoWorkspace.evGx[ 208 ]), &(acadoWorkspace.H10[ 104 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 944 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 104 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1064 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 104 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 112 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 952 ]), &(acadoWorkspace.evGx[ 224 ]), &(acadoWorkspace.H10[ 112 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1072 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 112 ]) );
acado_zeroBlockH10( &(acadoWorkspace.H10[ 120 ]) );
acado_multQETGx( &(acadoWorkspace.QE[ 1080 ]), &(acadoWorkspace.evGx[ 240 ]), &(acadoWorkspace.H10[ 120 ]) );
acado_setBlockH11_R1( 0, 0, acadoWorkspace.R1 );
acado_setBlockH11( 0, 0, acadoWorkspace.E, acadoWorkspace.QE );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 8 ]), &(acadoWorkspace.QE[ 8 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.QE[ 24 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QE[ 48 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 80 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 120 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 168 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 224 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 288 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 360 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 440 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 528 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 624 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 728 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 840 ]) );
acado_setBlockH11( 0, 0, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 960 ]) );
acado_zeroBlockH11( 0, 1 );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 8 ]), &(acadoWorkspace.QE[ 16 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.QE[ 32 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QE[ 56 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 88 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 128 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 176 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 232 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 296 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 368 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 448 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 536 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 632 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 736 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 848 ]) );
acado_setBlockH11( 0, 1, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 968 ]) );
acado_zeroBlockH11( 0, 2 );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.QE[ 40 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QE[ 64 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 96 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 136 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 184 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 240 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 304 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 376 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 456 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 544 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 640 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 744 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 856 ]) );
acado_setBlockH11( 0, 2, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 976 ]) );
acado_zeroBlockH11( 0, 3 );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QE[ 72 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 104 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 144 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 192 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 248 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 312 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 384 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 464 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 552 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 648 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 752 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 864 ]) );
acado_setBlockH11( 0, 3, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 984 ]) );
acado_zeroBlockH11( 0, 4 );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_setBlockH11( 0, 4, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_zeroBlockH11( 0, 5 );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 0, 5, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 0, 6 );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 0, 6, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 0, 7 );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 0, 7, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 0, 8 );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 0, 8, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 0, 9 );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 0, 9, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 0, 10 );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 0, 10, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 0, 11 );
acado_setBlockH11( 0, 11, &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 0, 11, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 0, 11, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 0, 11, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 0, 11, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 0, 12 );
acado_setBlockH11( 0, 12, &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 0, 12, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 0, 12, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 0, 12, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 0, 13 );
acado_setBlockH11( 0, 13, &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 0, 13, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 0, 13, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 0, 14 );
acado_setBlockH11( 0, 14, &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 0, 14, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 0, 15 );
acado_setBlockH11( 0, 15, &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 1, 1, &(acadoWorkspace.R1[ 4 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 16 ]), &(acadoWorkspace.QE[ 16 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.QE[ 32 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.QE[ 56 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QE[ 88 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 128 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 176 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 232 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 296 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 368 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 448 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 536 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 632 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 736 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 848 ]) );
acado_setBlockH11( 1, 1, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 968 ]) );
acado_zeroBlockH11( 1, 2 );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.QE[ 40 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.QE[ 64 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QE[ 96 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 136 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 184 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 240 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 304 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 376 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 456 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 544 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 640 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 744 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 856 ]) );
acado_setBlockH11( 1, 2, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 976 ]) );
acado_zeroBlockH11( 1, 3 );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.QE[ 72 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QE[ 104 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 144 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 192 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 248 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 312 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 384 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 464 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 552 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 648 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 752 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 864 ]) );
acado_setBlockH11( 1, 3, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 984 ]) );
acado_zeroBlockH11( 1, 4 );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_setBlockH11( 1, 4, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_zeroBlockH11( 1, 5 );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 1, 5, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 1, 6 );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 1, 6, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 1, 7 );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 1, 7, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 1, 8 );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 1, 8, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 1, 9 );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 1, 9, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 1, 10 );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 1, 10, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 1, 11 );
acado_setBlockH11( 1, 11, &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 1, 11, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 1, 11, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 1, 11, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 1, 11, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 1, 12 );
acado_setBlockH11( 1, 12, &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 1, 12, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 1, 12, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 1, 12, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 1, 13 );
acado_setBlockH11( 1, 13, &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 1, 13, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 1, 13, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 1, 14 );
acado_setBlockH11( 1, 14, &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 1, 14, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 1, 15 );
acado_setBlockH11( 1, 15, &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 2, 2, &(acadoWorkspace.R1[ 8 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 40 ]), &(acadoWorkspace.QE[ 40 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.QE[ 64 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.QE[ 96 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QE[ 136 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 184 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 240 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 304 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 376 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 456 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 544 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 640 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 744 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 856 ]) );
acado_setBlockH11( 2, 2, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 976 ]) );
acado_zeroBlockH11( 2, 3 );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.QE[ 72 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.QE[ 104 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QE[ 144 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 192 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 248 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 312 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 384 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 464 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 552 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 648 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 752 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 864 ]) );
acado_setBlockH11( 2, 3, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 984 ]) );
acado_zeroBlockH11( 2, 4 );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_setBlockH11( 2, 4, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_zeroBlockH11( 2, 5 );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 2, 5, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 2, 6 );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 2, 6, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 2, 7 );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 2, 7, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 2, 8 );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 2, 8, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 2, 9 );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 2, 9, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 2, 10 );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 2, 10, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 2, 11 );
acado_setBlockH11( 2, 11, &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 2, 11, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 2, 11, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 2, 11, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 2, 11, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 2, 12 );
acado_setBlockH11( 2, 12, &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 2, 12, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 2, 12, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 2, 12, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 2, 13 );
acado_setBlockH11( 2, 13, &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 2, 13, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 2, 13, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 2, 14 );
acado_setBlockH11( 2, 14, &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 2, 14, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 2, 15 );
acado_setBlockH11( 2, 15, &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 3, 3, &(acadoWorkspace.R1[ 12 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 72 ]), &(acadoWorkspace.QE[ 72 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.QE[ 104 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.QE[ 144 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QE[ 192 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 248 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 312 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 384 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 464 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 552 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 648 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 752 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 864 ]) );
acado_setBlockH11( 3, 3, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 984 ]) );
acado_zeroBlockH11( 3, 4 );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_setBlockH11( 3, 4, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_zeroBlockH11( 3, 5 );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 3, 5, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 3, 6 );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 3, 6, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 3, 7 );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 3, 7, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 3, 8 );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 3, 8, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 3, 9 );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 3, 9, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 3, 10 );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 3, 10, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 3, 11 );
acado_setBlockH11( 3, 11, &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 3, 11, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 3, 11, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 3, 11, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 3, 11, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 3, 12 );
acado_setBlockH11( 3, 12, &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 3, 12, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 3, 12, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 3, 12, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 3, 13 );
acado_setBlockH11( 3, 13, &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 3, 13, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 3, 13, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 3, 14 );
acado_setBlockH11( 3, 14, &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 3, 14, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 3, 15 );
acado_setBlockH11( 3, 15, &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 4, 4, &(acadoWorkspace.R1[ 16 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 112 ]), &(acadoWorkspace.QE[ 112 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.QE[ 152 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.QE[ 200 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QE[ 256 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 320 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 392 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 472 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 560 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 656 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 760 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 872 ]) );
acado_setBlockH11( 4, 4, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 992 ]) );
acado_zeroBlockH11( 4, 5 );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 4, 5, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 4, 6 );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 4, 6, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 4, 7 );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 4, 7, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 4, 8 );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 4, 8, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 4, 9 );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 4, 9, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 4, 10 );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 4, 10, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 4, 11 );
acado_setBlockH11( 4, 11, &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 4, 11, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 4, 11, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 4, 11, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 4, 11, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 4, 12 );
acado_setBlockH11( 4, 12, &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 4, 12, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 4, 12, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 4, 12, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 4, 13 );
acado_setBlockH11( 4, 13, &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 4, 13, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 4, 13, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 4, 14 );
acado_setBlockH11( 4, 14, &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 4, 14, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 4, 15 );
acado_setBlockH11( 4, 15, &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 5, 5, &(acadoWorkspace.R1[ 20 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 160 ]), &(acadoWorkspace.QE[ 160 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.QE[ 208 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.QE[ 264 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QE[ 328 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 400 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 480 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 568 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 664 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 768 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 880 ]) );
acado_setBlockH11( 5, 5, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1000 ]) );
acado_zeroBlockH11( 5, 6 );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 5, 6, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 5, 7 );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 5, 7, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 5, 8 );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 5, 8, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 5, 9 );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 5, 9, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 5, 10 );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 5, 10, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 5, 11 );
acado_setBlockH11( 5, 11, &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 5, 11, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 5, 11, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 5, 11, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 5, 11, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 5, 12 );
acado_setBlockH11( 5, 12, &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 5, 12, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 5, 12, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 5, 12, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 5, 13 );
acado_setBlockH11( 5, 13, &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 5, 13, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 5, 13, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 5, 14 );
acado_setBlockH11( 5, 14, &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 5, 14, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 5, 15 );
acado_setBlockH11( 5, 15, &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 6, 6, &(acadoWorkspace.R1[ 24 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 216 ]), &(acadoWorkspace.QE[ 216 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.QE[ 272 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.QE[ 336 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QE[ 408 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 488 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 576 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 672 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 776 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 888 ]) );
acado_setBlockH11( 6, 6, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1008 ]) );
acado_zeroBlockH11( 6, 7 );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 6, 7, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 6, 8 );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 6, 8, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 6, 9 );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 6, 9, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 6, 10 );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 6, 10, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 6, 11 );
acado_setBlockH11( 6, 11, &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 6, 11, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 6, 11, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 6, 11, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 6, 11, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 6, 12 );
acado_setBlockH11( 6, 12, &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 6, 12, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 6, 12, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 6, 12, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 6, 13 );
acado_setBlockH11( 6, 13, &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 6, 13, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 6, 13, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 6, 14 );
acado_setBlockH11( 6, 14, &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 6, 14, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 6, 15 );
acado_setBlockH11( 6, 15, &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 7, 7, &(acadoWorkspace.R1[ 28 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 280 ]), &(acadoWorkspace.QE[ 280 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.QE[ 344 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.QE[ 416 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QE[ 496 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 584 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 680 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 784 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 896 ]) );
acado_setBlockH11( 7, 7, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1016 ]) );
acado_zeroBlockH11( 7, 8 );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 7, 8, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 7, 9 );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 7, 9, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 7, 10 );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 7, 10, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 7, 11 );
acado_setBlockH11( 7, 11, &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 7, 11, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 7, 11, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 7, 11, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 7, 11, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 7, 12 );
acado_setBlockH11( 7, 12, &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 7, 12, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 7, 12, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 7, 12, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 7, 13 );
acado_setBlockH11( 7, 13, &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 7, 13, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 7, 13, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 7, 14 );
acado_setBlockH11( 7, 14, &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 7, 14, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 7, 15 );
acado_setBlockH11( 7, 15, &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 8, 8, &(acadoWorkspace.R1[ 32 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 352 ]), &(acadoWorkspace.QE[ 352 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.QE[ 424 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.QE[ 504 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QE[ 592 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 688 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 792 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 904 ]) );
acado_setBlockH11( 8, 8, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1024 ]) );
acado_zeroBlockH11( 8, 9 );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 8, 9, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 8, 10 );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 8, 10, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 8, 11 );
acado_setBlockH11( 8, 11, &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 8, 11, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 8, 11, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 8, 11, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 8, 11, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 8, 12 );
acado_setBlockH11( 8, 12, &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 8, 12, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 8, 12, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 8, 12, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 8, 13 );
acado_setBlockH11( 8, 13, &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 8, 13, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 8, 13, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 8, 14 );
acado_setBlockH11( 8, 14, &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 8, 14, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 8, 15 );
acado_setBlockH11( 8, 15, &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 9, 9, &(acadoWorkspace.R1[ 36 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 432 ]), &(acadoWorkspace.QE[ 432 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.QE[ 512 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.QE[ 600 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QE[ 696 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 800 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 912 ]) );
acado_setBlockH11( 9, 9, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1032 ]) );
acado_zeroBlockH11( 9, 10 );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 9, 10, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 9, 11 );
acado_setBlockH11( 9, 11, &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 9, 11, &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 9, 11, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 9, 11, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 9, 11, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 9, 12 );
acado_setBlockH11( 9, 12, &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 9, 12, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 9, 12, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 9, 12, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 9, 13 );
acado_setBlockH11( 9, 13, &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 9, 13, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 9, 13, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 9, 14 );
acado_setBlockH11( 9, 14, &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 9, 14, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 9, 15 );
acado_setBlockH11( 9, 15, &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 10, 10, &(acadoWorkspace.R1[ 40 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 520 ]), &(acadoWorkspace.QE[ 520 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.QE[ 608 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.QE[ 704 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QE[ 808 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 920 ]) );
acado_setBlockH11( 10, 10, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1040 ]) );
acado_zeroBlockH11( 10, 11 );
acado_setBlockH11( 10, 11, &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 10, 11, &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 10, 11, &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 10, 11, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 10, 11, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 10, 12 );
acado_setBlockH11( 10, 12, &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 10, 12, &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 10, 12, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 10, 12, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 10, 13 );
acado_setBlockH11( 10, 13, &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 10, 13, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 10, 13, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 10, 14 );
acado_setBlockH11( 10, 14, &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 10, 14, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 10, 15 );
acado_setBlockH11( 10, 15, &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 11, 11, &(acadoWorkspace.R1[ 44 ]) );
acado_setBlockH11( 11, 11, &(acadoWorkspace.E[ 616 ]), &(acadoWorkspace.QE[ 616 ]) );
acado_setBlockH11( 11, 11, &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.QE[ 712 ]) );
acado_setBlockH11( 11, 11, &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.QE[ 816 ]) );
acado_setBlockH11( 11, 11, &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QE[ 928 ]) );
acado_setBlockH11( 11, 11, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1048 ]) );
acado_zeroBlockH11( 11, 12 );
acado_setBlockH11( 11, 12, &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 11, 12, &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 11, 12, &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 11, 12, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 11, 13 );
acado_setBlockH11( 11, 13, &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 11, 13, &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 11, 13, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 11, 14 );
acado_setBlockH11( 11, 14, &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 11, 14, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 11, 15 );
acado_setBlockH11( 11, 15, &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 12, 12, &(acadoWorkspace.R1[ 48 ]) );
acado_setBlockH11( 12, 12, &(acadoWorkspace.E[ 720 ]), &(acadoWorkspace.QE[ 720 ]) );
acado_setBlockH11( 12, 12, &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.QE[ 824 ]) );
acado_setBlockH11( 12, 12, &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.QE[ 936 ]) );
acado_setBlockH11( 12, 12, &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QE[ 1056 ]) );
acado_zeroBlockH11( 12, 13 );
acado_setBlockH11( 12, 13, &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 12, 13, &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 12, 13, &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 12, 14 );
acado_setBlockH11( 12, 14, &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 12, 14, &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 12, 15 );
acado_setBlockH11( 12, 15, &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 13, 13, &(acadoWorkspace.R1[ 52 ]) );
acado_setBlockH11( 13, 13, &(acadoWorkspace.E[ 832 ]), &(acadoWorkspace.QE[ 832 ]) );
acado_setBlockH11( 13, 13, &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.QE[ 944 ]) );
acado_setBlockH11( 13, 13, &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.QE[ 1064 ]) );
acado_zeroBlockH11( 13, 14 );
acado_setBlockH11( 13, 14, &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 13, 14, &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 13, 15 );
acado_setBlockH11( 13, 15, &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 14, 14, &(acadoWorkspace.R1[ 56 ]) );
acado_setBlockH11( 14, 14, &(acadoWorkspace.E[ 952 ]), &(acadoWorkspace.QE[ 952 ]) );
acado_setBlockH11( 14, 14, &(acadoWorkspace.E[ 1072 ]), &(acadoWorkspace.QE[ 1072 ]) );
acado_zeroBlockH11( 14, 15 );
acado_setBlockH11( 14, 15, &(acadoWorkspace.E[ 1072 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_setBlockH11_R1( 15, 15, &(acadoWorkspace.R1[ 60 ]) );
acado_setBlockH11( 15, 15, &(acadoWorkspace.E[ 1080 ]), &(acadoWorkspace.QE[ 1080 ]) );
acado_copyHTH( 1, 0 );
acado_copyHTH( 2, 0 );
acado_copyHTH( 2, 1 );
acado_copyHTH( 3, 0 );
acado_copyHTH( 3, 1 );
acado_copyHTH( 3, 2 );
acado_copyHTH( 4, 0 );
acado_copyHTH( 4, 1 );
acado_copyHTH( 4, 2 );
acado_copyHTH( 4, 3 );
acado_copyHTH( 5, 0 );
acado_copyHTH( 5, 1 );
acado_copyHTH( 5, 2 );
acado_copyHTH( 5, 3 );
acado_copyHTH( 5, 4 );
acado_copyHTH( 6, 0 );
acado_copyHTH( 6, 1 );
acado_copyHTH( 6, 2 );
acado_copyHTH( 6, 3 );
acado_copyHTH( 6, 4 );
acado_copyHTH( 6, 5 );
acado_copyHTH( 7, 0 );
acado_copyHTH( 7, 1 );
acado_copyHTH( 7, 2 );
acado_copyHTH( 7, 3 );
acado_copyHTH( 7, 4 );
acado_copyHTH( 7, 5 );
acado_copyHTH( 7, 6 );
acado_copyHTH( 8, 0 );
acado_copyHTH( 8, 1 );
acado_copyHTH( 8, 2 );
acado_copyHTH( 8, 3 );
acado_copyHTH( 8, 4 );
acado_copyHTH( 8, 5 );
acado_copyHTH( 8, 6 );
acado_copyHTH( 8, 7 );
acado_copyHTH( 9, 0 );
acado_copyHTH( 9, 1 );
acado_copyHTH( 9, 2 );
acado_copyHTH( 9, 3 );
acado_copyHTH( 9, 4 );
acado_copyHTH( 9, 5 );
acado_copyHTH( 9, 6 );
acado_copyHTH( 9, 7 );
acado_copyHTH( 9, 8 );
acado_copyHTH( 10, 0 );
acado_copyHTH( 10, 1 );
acado_copyHTH( 10, 2 );
acado_copyHTH( 10, 3 );
acado_copyHTH( 10, 4 );
acado_copyHTH( 10, 5 );
acado_copyHTH( 10, 6 );
acado_copyHTH( 10, 7 );
acado_copyHTH( 10, 8 );
acado_copyHTH( 10, 9 );
acado_copyHTH( 11, 0 );
acado_copyHTH( 11, 1 );
acado_copyHTH( 11, 2 );
acado_copyHTH( 11, 3 );
acado_copyHTH( 11, 4 );
acado_copyHTH( 11, 5 );
acado_copyHTH( 11, 6 );
acado_copyHTH( 11, 7 );
acado_copyHTH( 11, 8 );
acado_copyHTH( 11, 9 );
acado_copyHTH( 11, 10 );
acado_copyHTH( 12, 0 );
acado_copyHTH( 12, 1 );
acado_copyHTH( 12, 2 );
acado_copyHTH( 12, 3 );
acado_copyHTH( 12, 4 );
acado_copyHTH( 12, 5 );
acado_copyHTH( 12, 6 );
acado_copyHTH( 12, 7 );
acado_copyHTH( 12, 8 );
acado_copyHTH( 12, 9 );
acado_copyHTH( 12, 10 );
acado_copyHTH( 12, 11 );
acado_copyHTH( 13, 0 );
acado_copyHTH( 13, 1 );
acado_copyHTH( 13, 2 );
acado_copyHTH( 13, 3 );
acado_copyHTH( 13, 4 );
acado_copyHTH( 13, 5 );
acado_copyHTH( 13, 6 );
acado_copyHTH( 13, 7 );
acado_copyHTH( 13, 8 );
acado_copyHTH( 13, 9 );
acado_copyHTH( 13, 10 );
acado_copyHTH( 13, 11 );
acado_copyHTH( 13, 12 );
acado_copyHTH( 14, 0 );
acado_copyHTH( 14, 1 );
acado_copyHTH( 14, 2 );
acado_copyHTH( 14, 3 );
acado_copyHTH( 14, 4 );
acado_copyHTH( 14, 5 );
acado_copyHTH( 14, 6 );
acado_copyHTH( 14, 7 );
acado_copyHTH( 14, 8 );
acado_copyHTH( 14, 9 );
acado_copyHTH( 14, 10 );
acado_copyHTH( 14, 11 );
acado_copyHTH( 14, 12 );
acado_copyHTH( 14, 13 );
acado_copyHTH( 15, 0 );
acado_copyHTH( 15, 1 );
acado_copyHTH( 15, 2 );
acado_copyHTH( 15, 3 );
acado_copyHTH( 15, 4 );
acado_copyHTH( 15, 5 );
acado_copyHTH( 15, 6 );
acado_copyHTH( 15, 7 );
acado_copyHTH( 15, 8 );
acado_copyHTH( 15, 9 );
acado_copyHTH( 15, 10 );
acado_copyHTH( 15, 11 );
acado_copyHTH( 15, 12 );
acado_copyHTH( 15, 13 );
acado_copyHTH( 15, 14 );
acado_multQ1d( &(acadoWorkspace.Q1[ 16 ]), acadoWorkspace.d, acadoWorkspace.Qd );
acado_multQ1d( &(acadoWorkspace.Q1[ 32 ]), &(acadoWorkspace.d[ 4 ]), &(acadoWorkspace.Qd[ 4 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 48 ]), &(acadoWorkspace.d[ 8 ]), &(acadoWorkspace.Qd[ 8 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 64 ]), &(acadoWorkspace.d[ 12 ]), &(acadoWorkspace.Qd[ 12 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 80 ]), &(acadoWorkspace.d[ 16 ]), &(acadoWorkspace.Qd[ 16 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 96 ]), &(acadoWorkspace.d[ 20 ]), &(acadoWorkspace.Qd[ 20 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 112 ]), &(acadoWorkspace.d[ 24 ]), &(acadoWorkspace.Qd[ 24 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 128 ]), &(acadoWorkspace.d[ 28 ]), &(acadoWorkspace.Qd[ 28 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 144 ]), &(acadoWorkspace.d[ 32 ]), &(acadoWorkspace.Qd[ 32 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 160 ]), &(acadoWorkspace.d[ 36 ]), &(acadoWorkspace.Qd[ 36 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 176 ]), &(acadoWorkspace.d[ 40 ]), &(acadoWorkspace.Qd[ 40 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 192 ]), &(acadoWorkspace.d[ 44 ]), &(acadoWorkspace.Qd[ 44 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 208 ]), &(acadoWorkspace.d[ 48 ]), &(acadoWorkspace.Qd[ 48 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 224 ]), &(acadoWorkspace.d[ 52 ]), &(acadoWorkspace.Qd[ 52 ]) );
acado_multQ1d( &(acadoWorkspace.Q1[ 240 ]), &(acadoWorkspace.d[ 56 ]), &(acadoWorkspace.Qd[ 56 ]) );
acado_multQN1d( acadoWorkspace.QN1, &(acadoWorkspace.d[ 60 ]), &(acadoWorkspace.Qd[ 60 ]) );
acado_macETSlu( acadoWorkspace.QE, acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 8 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 24 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 48 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 80 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 120 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 168 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 224 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 288 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 360 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 440 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 528 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 624 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 728 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 840 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 960 ]), acadoWorkspace.g );
acado_macETSlu( &(acadoWorkspace.QE[ 16 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 32 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 56 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 88 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 128 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 176 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 232 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 296 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 368 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 448 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 536 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 632 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 736 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 848 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 968 ]), &(acadoWorkspace.g[ 2 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 40 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 64 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 96 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 136 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 184 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 240 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 304 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 376 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 456 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 544 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 640 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 744 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 856 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 976 ]), &(acadoWorkspace.g[ 4 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 72 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 104 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 144 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 192 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 248 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 312 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 384 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 464 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 552 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 648 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 752 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 864 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 984 ]), &(acadoWorkspace.g[ 6 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 112 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 152 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 200 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 256 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 320 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 392 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 472 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 560 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 656 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 760 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 872 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 992 ]), &(acadoWorkspace.g[ 8 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 160 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 208 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 264 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 328 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 400 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 480 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 568 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 664 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 768 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 880 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1000 ]), &(acadoWorkspace.g[ 10 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 216 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 272 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 336 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 408 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 488 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 576 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 672 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 776 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 888 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1008 ]), &(acadoWorkspace.g[ 12 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 280 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 344 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 416 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 496 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 584 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 680 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 784 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 896 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1016 ]), &(acadoWorkspace.g[ 14 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 352 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 424 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 504 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 592 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 688 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 792 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 904 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1024 ]), &(acadoWorkspace.g[ 16 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 432 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 512 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 600 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 696 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 800 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 912 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1032 ]), &(acadoWorkspace.g[ 18 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 520 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 608 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 704 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 808 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 920 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1040 ]), &(acadoWorkspace.g[ 20 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 616 ]), &(acadoWorkspace.g[ 22 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 712 ]), &(acadoWorkspace.g[ 22 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 816 ]), &(acadoWorkspace.g[ 22 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 928 ]), &(acadoWorkspace.g[ 22 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1048 ]), &(acadoWorkspace.g[ 22 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 720 ]), &(acadoWorkspace.g[ 24 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 824 ]), &(acadoWorkspace.g[ 24 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 936 ]), &(acadoWorkspace.g[ 24 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1056 ]), &(acadoWorkspace.g[ 24 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 832 ]), &(acadoWorkspace.g[ 26 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 944 ]), &(acadoWorkspace.g[ 26 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1064 ]), &(acadoWorkspace.g[ 26 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 952 ]), &(acadoWorkspace.g[ 28 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1072 ]), &(acadoWorkspace.g[ 28 ]) );
acado_macETSlu( &(acadoWorkspace.QE[ 1080 ]), &(acadoWorkspace.g[ 30 ]) );
acadoWorkspace.lb[0] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[0];
acadoWorkspace.lb[1] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[1];
acadoWorkspace.lb[2] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[2];
acadoWorkspace.lb[3] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[3];
acadoWorkspace.lb[4] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[4];
acadoWorkspace.lb[5] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[5];
acadoWorkspace.lb[6] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[6];
acadoWorkspace.lb[7] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[7];
acadoWorkspace.lb[8] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[8];
acadoWorkspace.lb[9] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[9];
acadoWorkspace.lb[10] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[10];
acadoWorkspace.lb[11] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[11];
acadoWorkspace.lb[12] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[12];
acadoWorkspace.lb[13] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[13];
acadoWorkspace.lb[14] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[14];
acadoWorkspace.lb[15] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[15];
acadoWorkspace.lb[16] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[16];
acadoWorkspace.lb[17] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[17];
acadoWorkspace.lb[18] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[18];
acadoWorkspace.lb[19] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[19];
acadoWorkspace.lb[20] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[20];
acadoWorkspace.lb[21] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[21];
acadoWorkspace.lb[22] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[22];
acadoWorkspace.lb[23] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[23];
acadoWorkspace.lb[24] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[24];
acadoWorkspace.lb[25] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[25];
acadoWorkspace.lb[26] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[26];
acadoWorkspace.lb[27] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[27];
acadoWorkspace.lb[28] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[28];
acadoWorkspace.lb[29] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[29];
acadoWorkspace.lb[30] = (real_t)-1.0000000000000000e+00 - acadoVariables.u[30];
acadoWorkspace.lb[31] = (real_t)-5.9999999999999998e-01 - acadoVariables.u[31];
acadoWorkspace.ub[0] = (real_t)1.0000000000000000e+00 - acadoVariables.u[0];
acadoWorkspace.ub[1] = (real_t)5.9999999999999998e-01 - acadoVariables.u[1];
acadoWorkspace.ub[2] = (real_t)1.0000000000000000e+00 - acadoVariables.u[2];
acadoWorkspace.ub[3] = (real_t)5.9999999999999998e-01 - acadoVariables.u[3];
acadoWorkspace.ub[4] = (real_t)1.0000000000000000e+00 - acadoVariables.u[4];
acadoWorkspace.ub[5] = (real_t)5.9999999999999998e-01 - acadoVariables.u[5];
acadoWorkspace.ub[6] = (real_t)1.0000000000000000e+00 - acadoVariables.u[6];
acadoWorkspace.ub[7] = (real_t)5.9999999999999998e-01 - acadoVariables.u[7];
acadoWorkspace.ub[8] = (real_t)1.0000000000000000e+00 - acadoVariables.u[8];
acadoWorkspace.ub[9] = (real_t)5.9999999999999998e-01 - acadoVariables.u[9];
acadoWorkspace.ub[10] = (real_t)1.0000000000000000e+00 - acadoVariables.u[10];
acadoWorkspace.ub[11] = (real_t)5.9999999999999998e-01 - acadoVariables.u[11];
acadoWorkspace.ub[12] = (real_t)1.0000000000000000e+00 - acadoVariables.u[12];
acadoWorkspace.ub[13] = (real_t)5.9999999999999998e-01 - acadoVariables.u[13];
acadoWorkspace.ub[14] = (real_t)1.0000000000000000e+00 - acadoVariables.u[14];
acadoWorkspace.ub[15] = (real_t)5.9999999999999998e-01 - acadoVariables.u[15];
acadoWorkspace.ub[16] = (real_t)1.0000000000000000e+00 - acadoVariables.u[16];
acadoWorkspace.ub[17] = (real_t)5.9999999999999998e-01 - acadoVariables.u[17];
acadoWorkspace.ub[18] = (real_t)1.0000000000000000e+00 - acadoVariables.u[18];
acadoWorkspace.ub[19] = (real_t)5.9999999999999998e-01 - acadoVariables.u[19];
acadoWorkspace.ub[20] = (real_t)1.0000000000000000e+00 - acadoVariables.u[20];
acadoWorkspace.ub[21] = (real_t)5.9999999999999998e-01 - acadoVariables.u[21];
acadoWorkspace.ub[22] = (real_t)1.0000000000000000e+00 - acadoVariables.u[22];
acadoWorkspace.ub[23] = (real_t)5.9999999999999998e-01 - acadoVariables.u[23];
acadoWorkspace.ub[24] = (real_t)1.0000000000000000e+00 - acadoVariables.u[24];
acadoWorkspace.ub[25] = (real_t)5.9999999999999998e-01 - acadoVariables.u[25];
acadoWorkspace.ub[26] = (real_t)1.0000000000000000e+00 - acadoVariables.u[26];
acadoWorkspace.ub[27] = (real_t)5.9999999999999998e-01 - acadoVariables.u[27];
acadoWorkspace.ub[28] = (real_t)1.0000000000000000e+00 - acadoVariables.u[28];
acadoWorkspace.ub[29] = (real_t)5.9999999999999998e-01 - acadoVariables.u[29];
acadoWorkspace.ub[30] = (real_t)1.0000000000000000e+00 - acadoVariables.u[30];
acadoWorkspace.ub[31] = (real_t)5.9999999999999998e-01 - acadoVariables.u[31];
}
void acado_condenseFdb( )
{
acadoWorkspace.Dx0[0] = acadoVariables.x0[0] - acadoVariables.x[0];
acadoWorkspace.Dx0[1] = acadoVariables.x0[1] - acadoVariables.x[1];
acadoWorkspace.Dx0[2] = acadoVariables.x0[2] - acadoVariables.x[2];
acadoWorkspace.Dx0[3] = acadoVariables.x0[3] - acadoVariables.x[3];
acadoWorkspace.Dy[0] -= acadoVariables.y[0];
acadoWorkspace.Dy[1] -= acadoVariables.y[1];
acadoWorkspace.Dy[2] -= acadoVariables.y[2];
acadoWorkspace.Dy[3] -= acadoVariables.y[3];
acadoWorkspace.Dy[4] -= acadoVariables.y[4];
acadoWorkspace.Dy[5] -= acadoVariables.y[5];
acadoWorkspace.Dy[6] -= acadoVariables.y[6];
acadoWorkspace.Dy[7] -= acadoVariables.y[7];
acadoWorkspace.Dy[8] -= acadoVariables.y[8];
acadoWorkspace.Dy[9] -= acadoVariables.y[9];
acadoWorkspace.Dy[10] -= acadoVariables.y[10];
acadoWorkspace.Dy[11] -= acadoVariables.y[11];
acadoWorkspace.Dy[12] -= acadoVariables.y[12];
acadoWorkspace.Dy[13] -= acadoVariables.y[13];
acadoWorkspace.Dy[14] -= acadoVariables.y[14];
acadoWorkspace.Dy[15] -= acadoVariables.y[15];
acadoWorkspace.Dy[16] -= acadoVariables.y[16];
acadoWorkspace.Dy[17] -= acadoVariables.y[17];
acadoWorkspace.Dy[18] -= acadoVariables.y[18];
acadoWorkspace.Dy[19] -= acadoVariables.y[19];
acadoWorkspace.Dy[20] -= acadoVariables.y[20];
acadoWorkspace.Dy[21] -= acadoVariables.y[21];
acadoWorkspace.Dy[22] -= acadoVariables.y[22];
acadoWorkspace.Dy[23] -= acadoVariables.y[23];
acadoWorkspace.Dy[24] -= acadoVariables.y[24];
acadoWorkspace.Dy[25] -= acadoVariables.y[25];
acadoWorkspace.Dy[26] -= acadoVariables.y[26];
acadoWorkspace.Dy[27] -= acadoVariables.y[27];
acadoWorkspace.Dy[28] -= acadoVariables.y[28];
acadoWorkspace.Dy[29] -= acadoVariables.y[29];
acadoWorkspace.Dy[30] -= acadoVariables.y[30];
acadoWorkspace.Dy[31] -= acadoVariables.y[31];
acadoWorkspace.Dy[32] -= acadoVariables.y[32];
acadoWorkspace.Dy[33] -= acadoVariables.y[33];
acadoWorkspace.Dy[34] -= acadoVariables.y[34];
acadoWorkspace.Dy[35] -= acadoVariables.y[35];
acadoWorkspace.Dy[36] -= acadoVariables.y[36];
acadoWorkspace.Dy[37] -= acadoVariables.y[37];
acadoWorkspace.Dy[38] -= acadoVariables.y[38];
acadoWorkspace.Dy[39] -= acadoVariables.y[39];
acadoWorkspace.Dy[40] -= acadoVariables.y[40];
acadoWorkspace.Dy[41] -= acadoVariables.y[41];
acadoWorkspace.Dy[42] -= acadoVariables.y[42];
acadoWorkspace.Dy[43] -= acadoVariables.y[43];
acadoWorkspace.Dy[44] -= acadoVariables.y[44];
acadoWorkspace.Dy[45] -= acadoVariables.y[45];
acadoWorkspace.Dy[46] -= acadoVariables.y[46];
acadoWorkspace.Dy[47] -= acadoVariables.y[47];
acadoWorkspace.Dy[48] -= acadoVariables.y[48];
acadoWorkspace.Dy[49] -= acadoVariables.y[49];
acadoWorkspace.Dy[50] -= acadoVariables.y[50];
acadoWorkspace.Dy[51] -= acadoVariables.y[51];
acadoWorkspace.Dy[52] -= acadoVariables.y[52];
acadoWorkspace.Dy[53] -= acadoVariables.y[53];
acadoWorkspace.Dy[54] -= acadoVariables.y[54];
acadoWorkspace.Dy[55] -= acadoVariables.y[55];
acadoWorkspace.Dy[56] -= acadoVariables.y[56];
acadoWorkspace.Dy[57] -= acadoVariables.y[57];
acadoWorkspace.Dy[58] -= acadoVariables.y[58];
acadoWorkspace.Dy[59] -= acadoVariables.y[59];
acadoWorkspace.Dy[60] -= acadoVariables.y[60];
acadoWorkspace.Dy[61] -= acadoVariables.y[61];
acadoWorkspace.Dy[62] -= acadoVariables.y[62];
acadoWorkspace.Dy[63] -= acadoVariables.y[63];
acadoWorkspace.Dy[64] -= acadoVariables.y[64];
acadoWorkspace.Dy[65] -= acadoVariables.y[65];
acadoWorkspace.Dy[66] -= acadoVariables.y[66];
acadoWorkspace.Dy[67] -= acadoVariables.y[67];
acadoWorkspace.Dy[68] -= acadoVariables.y[68];
acadoWorkspace.Dy[69] -= acadoVariables.y[69];
acadoWorkspace.Dy[70] -= acadoVariables.y[70];
acadoWorkspace.Dy[71] -= acadoVariables.y[71];
acadoWorkspace.Dy[72] -= acadoVariables.y[72];
acadoWorkspace.Dy[73] -= acadoVariables.y[73];
acadoWorkspace.Dy[74] -= acadoVariables.y[74];
acadoWorkspace.Dy[75] -= acadoVariables.y[75];
acadoWorkspace.Dy[76] -= acadoVariables.y[76];
acadoWorkspace.Dy[77] -= acadoVariables.y[77];
acadoWorkspace.Dy[78] -= acadoVariables.y[78];
acadoWorkspace.Dy[79] -= acadoVariables.y[79];
acadoWorkspace.Dy[80] -= acadoVariables.y[80];
acadoWorkspace.Dy[81] -= acadoVariables.y[81];
acadoWorkspace.Dy[82] -= acadoVariables.y[82];
acadoWorkspace.Dy[83] -= acadoVariables.y[83];
acadoWorkspace.Dy[84] -= acadoVariables.y[84];
acadoWorkspace.Dy[85] -= acadoVariables.y[85];
acadoWorkspace.Dy[86] -= acadoVariables.y[86];
acadoWorkspace.Dy[87] -= acadoVariables.y[87];
acadoWorkspace.Dy[88] -= acadoVariables.y[88];
acadoWorkspace.Dy[89] -= acadoVariables.y[89];
acadoWorkspace.Dy[90] -= acadoVariables.y[90];
acadoWorkspace.Dy[91] -= acadoVariables.y[91];
acadoWorkspace.Dy[92] -= acadoVariables.y[92];
acadoWorkspace.Dy[93] -= acadoVariables.y[93];
acadoWorkspace.Dy[94] -= acadoVariables.y[94];
acadoWorkspace.Dy[95] -= acadoVariables.y[95];
acadoWorkspace.DyN[0] -= acadoVariables.yN[0];
acadoWorkspace.DyN[1] -= acadoVariables.yN[1];
acadoWorkspace.DyN[2] -= acadoVariables.yN[2];
acadoWorkspace.DyN[3] -= acadoVariables.yN[3];
acado_multRDy( acadoWorkspace.R2, acadoWorkspace.Dy, acadoWorkspace.g );
acado_multRDy( &(acadoWorkspace.R2[ 12 ]), &(acadoWorkspace.Dy[ 6 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 24 ]), &(acadoWorkspace.Dy[ 12 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 36 ]), &(acadoWorkspace.Dy[ 18 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 48 ]), &(acadoWorkspace.Dy[ 24 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 60 ]), &(acadoWorkspace.Dy[ 30 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 72 ]), &(acadoWorkspace.Dy[ 36 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 84 ]), &(acadoWorkspace.Dy[ 42 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 96 ]), &(acadoWorkspace.Dy[ 48 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 108 ]), &(acadoWorkspace.Dy[ 54 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 120 ]), &(acadoWorkspace.Dy[ 60 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 132 ]), &(acadoWorkspace.Dy[ 66 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 144 ]), &(acadoWorkspace.Dy[ 72 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 156 ]), &(acadoWorkspace.Dy[ 78 ]), &(acadoWorkspace.g[ 26 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 168 ]), &(acadoWorkspace.Dy[ 84 ]), &(acadoWorkspace.g[ 28 ]) );
acado_multRDy( &(acadoWorkspace.R2[ 180 ]), &(acadoWorkspace.Dy[ 90 ]), &(acadoWorkspace.g[ 30 ]) );
acado_multQDy( acadoWorkspace.Q2, acadoWorkspace.Dy, acadoWorkspace.QDy );
acado_multQDy( &(acadoWorkspace.Q2[ 24 ]), &(acadoWorkspace.Dy[ 6 ]), &(acadoWorkspace.QDy[ 4 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 48 ]), &(acadoWorkspace.Dy[ 12 ]), &(acadoWorkspace.QDy[ 8 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 72 ]), &(acadoWorkspace.Dy[ 18 ]), &(acadoWorkspace.QDy[ 12 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 96 ]), &(acadoWorkspace.Dy[ 24 ]), &(acadoWorkspace.QDy[ 16 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 120 ]), &(acadoWorkspace.Dy[ 30 ]), &(acadoWorkspace.QDy[ 20 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 144 ]), &(acadoWorkspace.Dy[ 36 ]), &(acadoWorkspace.QDy[ 24 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 168 ]), &(acadoWorkspace.Dy[ 42 ]), &(acadoWorkspace.QDy[ 28 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 192 ]), &(acadoWorkspace.Dy[ 48 ]), &(acadoWorkspace.QDy[ 32 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 216 ]), &(acadoWorkspace.Dy[ 54 ]), &(acadoWorkspace.QDy[ 36 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 240 ]), &(acadoWorkspace.Dy[ 60 ]), &(acadoWorkspace.QDy[ 40 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 264 ]), &(acadoWorkspace.Dy[ 66 ]), &(acadoWorkspace.QDy[ 44 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 288 ]), &(acadoWorkspace.Dy[ 72 ]), &(acadoWorkspace.QDy[ 48 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 312 ]), &(acadoWorkspace.Dy[ 78 ]), &(acadoWorkspace.QDy[ 52 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 336 ]), &(acadoWorkspace.Dy[ 84 ]), &(acadoWorkspace.QDy[ 56 ]) );
acado_multQDy( &(acadoWorkspace.Q2[ 360 ]), &(acadoWorkspace.Dy[ 90 ]), &(acadoWorkspace.QDy[ 60 ]) );
acadoWorkspace.QDy[64] = + acadoWorkspace.QN2[0]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[1]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[2]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[3]*acadoWorkspace.DyN[3];
acadoWorkspace.QDy[65] = + acadoWorkspace.QN2[4]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[5]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[6]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[7]*acadoWorkspace.DyN[3];
acadoWorkspace.QDy[66] = + acadoWorkspace.QN2[8]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[9]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[10]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[11]*acadoWorkspace.DyN[3];
acadoWorkspace.QDy[67] = + acadoWorkspace.QN2[12]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[13]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[14]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[15]*acadoWorkspace.DyN[3];
acadoWorkspace.QDy[4] += acadoWorkspace.Qd[0];
acadoWorkspace.QDy[5] += acadoWorkspace.Qd[1];
acadoWorkspace.QDy[6] += acadoWorkspace.Qd[2];
acadoWorkspace.QDy[7] += acadoWorkspace.Qd[3];
acadoWorkspace.QDy[8] += acadoWorkspace.Qd[4];
acadoWorkspace.QDy[9] += acadoWorkspace.Qd[5];
acadoWorkspace.QDy[10] += acadoWorkspace.Qd[6];
acadoWorkspace.QDy[11] += acadoWorkspace.Qd[7];
acadoWorkspace.QDy[12] += acadoWorkspace.Qd[8];
acadoWorkspace.QDy[13] += acadoWorkspace.Qd[9];
acadoWorkspace.QDy[14] += acadoWorkspace.Qd[10];
acadoWorkspace.QDy[15] += acadoWorkspace.Qd[11];
acadoWorkspace.QDy[16] += acadoWorkspace.Qd[12];
acadoWorkspace.QDy[17] += acadoWorkspace.Qd[13];
acadoWorkspace.QDy[18] += acadoWorkspace.Qd[14];
acadoWorkspace.QDy[19] += acadoWorkspace.Qd[15];
acadoWorkspace.QDy[20] += acadoWorkspace.Qd[16];
acadoWorkspace.QDy[21] += acadoWorkspace.Qd[17];
acadoWorkspace.QDy[22] += acadoWorkspace.Qd[18];
acadoWorkspace.QDy[23] += acadoWorkspace.Qd[19];
acadoWorkspace.QDy[24] += acadoWorkspace.Qd[20];
acadoWorkspace.QDy[25] += acadoWorkspace.Qd[21];
acadoWorkspace.QDy[26] += acadoWorkspace.Qd[22];
acadoWorkspace.QDy[27] += acadoWorkspace.Qd[23];
acadoWorkspace.QDy[28] += acadoWorkspace.Qd[24];
acadoWorkspace.QDy[29] += acadoWorkspace.Qd[25];
acadoWorkspace.QDy[30] += acadoWorkspace.Qd[26];
acadoWorkspace.QDy[31] += acadoWorkspace.Qd[27];
acadoWorkspace.QDy[32] += acadoWorkspace.Qd[28];
acadoWorkspace.QDy[33] += acadoWorkspace.Qd[29];
acadoWorkspace.QDy[34] += acadoWorkspace.Qd[30];
acadoWorkspace.QDy[35] += acadoWorkspace.Qd[31];
acadoWorkspace.QDy[36] += acadoWorkspace.Qd[32];
acadoWorkspace.QDy[37] += acadoWorkspace.Qd[33];
acadoWorkspace.QDy[38] += acadoWorkspace.Qd[34];
acadoWorkspace.QDy[39] += acadoWorkspace.Qd[35];
acadoWorkspace.QDy[40] += acadoWorkspace.Qd[36];
acadoWorkspace.QDy[41] += acadoWorkspace.Qd[37];
acadoWorkspace.QDy[42] += acadoWorkspace.Qd[38];
acadoWorkspace.QDy[43] += acadoWorkspace.Qd[39];
acadoWorkspace.QDy[44] += acadoWorkspace.Qd[40];
acadoWorkspace.QDy[45] += acadoWorkspace.Qd[41];
acadoWorkspace.QDy[46] += acadoWorkspace.Qd[42];
acadoWorkspace.QDy[47] += acadoWorkspace.Qd[43];
acadoWorkspace.QDy[48] += acadoWorkspace.Qd[44];
acadoWorkspace.QDy[49] += acadoWorkspace.Qd[45];
acadoWorkspace.QDy[50] += acadoWorkspace.Qd[46];
acadoWorkspace.QDy[51] += acadoWorkspace.Qd[47];
acadoWorkspace.QDy[52] += acadoWorkspace.Qd[48];
acadoWorkspace.QDy[53] += acadoWorkspace.Qd[49];
acadoWorkspace.QDy[54] += acadoWorkspace.Qd[50];
acadoWorkspace.QDy[55] += acadoWorkspace.Qd[51];
acadoWorkspace.QDy[56] += acadoWorkspace.Qd[52];
acadoWorkspace.QDy[57] += acadoWorkspace.Qd[53];
acadoWorkspace.QDy[58] += acadoWorkspace.Qd[54];
acadoWorkspace.QDy[59] += acadoWorkspace.Qd[55];
acadoWorkspace.QDy[60] += acadoWorkspace.Qd[56];
acadoWorkspace.QDy[61] += acadoWorkspace.Qd[57];
acadoWorkspace.QDy[62] += acadoWorkspace.Qd[58];
acadoWorkspace.QDy[63] += acadoWorkspace.Qd[59];
acadoWorkspace.QDy[64] += acadoWorkspace.Qd[60];
acadoWorkspace.QDy[65] += acadoWorkspace.Qd[61];
acadoWorkspace.QDy[66] += acadoWorkspace.Qd[62];
acadoWorkspace.QDy[67] += acadoWorkspace.Qd[63];
acado_multEQDy( acadoWorkspace.E, &(acadoWorkspace.QDy[ 4 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 8 ]), &(acadoWorkspace.QDy[ 8 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 24 ]), &(acadoWorkspace.QDy[ 12 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 48 ]), &(acadoWorkspace.QDy[ 16 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 80 ]), &(acadoWorkspace.QDy[ 20 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 120 ]), &(acadoWorkspace.QDy[ 24 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 168 ]), &(acadoWorkspace.QDy[ 28 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 224 ]), &(acadoWorkspace.QDy[ 32 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 288 ]), &(acadoWorkspace.QDy[ 36 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 360 ]), &(acadoWorkspace.QDy[ 40 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 440 ]), &(acadoWorkspace.QDy[ 44 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 528 ]), &(acadoWorkspace.QDy[ 48 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 624 ]), &(acadoWorkspace.QDy[ 52 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 728 ]), &(acadoWorkspace.QDy[ 56 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 840 ]), &(acadoWorkspace.QDy[ 60 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 960 ]), &(acadoWorkspace.QDy[ 64 ]), acadoWorkspace.g );
acado_multEQDy( &(acadoWorkspace.E[ 16 ]), &(acadoWorkspace.QDy[ 8 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.QDy[ 12 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.QDy[ 16 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.QDy[ 20 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.QDy[ 24 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 2 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 40 ]), &(acadoWorkspace.QDy[ 12 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.QDy[ 16 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.QDy[ 20 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.QDy[ 24 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 4 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 72 ]), &(acadoWorkspace.QDy[ 16 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.QDy[ 20 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.QDy[ 24 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 6 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 112 ]), &(acadoWorkspace.QDy[ 20 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.QDy[ 24 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 8 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 160 ]), &(acadoWorkspace.QDy[ 24 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 10 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 216 ]), &(acadoWorkspace.QDy[ 28 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 12 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 280 ]), &(acadoWorkspace.QDy[ 32 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 14 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 352 ]), &(acadoWorkspace.QDy[ 36 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 16 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 432 ]), &(acadoWorkspace.QDy[ 40 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 18 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 520 ]), &(acadoWorkspace.QDy[ 44 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 20 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 616 ]), &(acadoWorkspace.QDy[ 48 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 22 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 720 ]), &(acadoWorkspace.QDy[ 52 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 24 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 832 ]), &(acadoWorkspace.QDy[ 56 ]), &(acadoWorkspace.g[ 26 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 26 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 26 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 952 ]), &(acadoWorkspace.QDy[ 60 ]), &(acadoWorkspace.g[ 28 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1072 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 28 ]) );
acado_multEQDy( &(acadoWorkspace.E[ 1080 ]), &(acadoWorkspace.QDy[ 64 ]), &(acadoWorkspace.g[ 30 ]) );
acadoWorkspace.g[0] += + acadoWorkspace.H10[0]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[1]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[2]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[3]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[1] += + acadoWorkspace.H10[4]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[5]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[6]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[7]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[2] += + acadoWorkspace.H10[8]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[9]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[10]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[11]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[3] += + acadoWorkspace.H10[12]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[13]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[14]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[15]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[4] += + acadoWorkspace.H10[16]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[17]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[18]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[19]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[5] += + acadoWorkspace.H10[20]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[21]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[22]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[23]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[6] += + acadoWorkspace.H10[24]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[25]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[26]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[27]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[7] += + acadoWorkspace.H10[28]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[29]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[30]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[31]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[8] += + acadoWorkspace.H10[32]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[33]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[34]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[35]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[9] += + acadoWorkspace.H10[36]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[37]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[38]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[39]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[10] += + acadoWorkspace.H10[40]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[41]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[42]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[43]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[11] += + acadoWorkspace.H10[44]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[45]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[46]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[47]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[12] += + acadoWorkspace.H10[48]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[49]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[50]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[51]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[13] += + acadoWorkspace.H10[52]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[53]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[54]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[55]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[14] += + acadoWorkspace.H10[56]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[57]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[58]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[59]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[15] += + acadoWorkspace.H10[60]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[61]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[62]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[63]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[16] += + acadoWorkspace.H10[64]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[65]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[66]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[67]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[17] += + acadoWorkspace.H10[68]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[69]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[70]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[71]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[18] += + acadoWorkspace.H10[72]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[73]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[74]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[75]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[19] += + acadoWorkspace.H10[76]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[77]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[78]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[79]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[20] += + acadoWorkspace.H10[80]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[81]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[82]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[83]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[21] += + acadoWorkspace.H10[84]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[85]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[86]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[87]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[22] += + acadoWorkspace.H10[88]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[89]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[90]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[91]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[23] += + acadoWorkspace.H10[92]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[93]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[94]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[95]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[24] += + acadoWorkspace.H10[96]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[97]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[98]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[99]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[25] += + acadoWorkspace.H10[100]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[101]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[102]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[103]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[26] += + acadoWorkspace.H10[104]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[105]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[106]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[107]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[27] += + acadoWorkspace.H10[108]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[109]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[110]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[111]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[28] += + acadoWorkspace.H10[112]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[113]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[114]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[115]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[29] += + acadoWorkspace.H10[116]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[117]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[118]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[119]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[30] += + acadoWorkspace.H10[120]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[121]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[122]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[123]*acadoWorkspace.Dx0[3];
acadoWorkspace.g[31] += + acadoWorkspace.H10[124]*acadoWorkspace.Dx0[0] + acadoWorkspace.H10[125]*acadoWorkspace.Dx0[1] + acadoWorkspace.H10[126]*acadoWorkspace.Dx0[2] + acadoWorkspace.H10[127]*acadoWorkspace.Dx0[3];
}
void acado_expand( )
{
acadoVariables.u[0] += acadoWorkspace.x[0];
acadoVariables.u[1] += acadoWorkspace.x[1];
acadoVariables.u[2] += acadoWorkspace.x[2];
acadoVariables.u[3] += acadoWorkspace.x[3];
acadoVariables.u[4] += acadoWorkspace.x[4];
acadoVariables.u[5] += acadoWorkspace.x[5];
acadoVariables.u[6] += acadoWorkspace.x[6];
acadoVariables.u[7] += acadoWorkspace.x[7];
acadoVariables.u[8] += acadoWorkspace.x[8];
acadoVariables.u[9] += acadoWorkspace.x[9];
acadoVariables.u[10] += acadoWorkspace.x[10];
acadoVariables.u[11] += acadoWorkspace.x[11];
acadoVariables.u[12] += acadoWorkspace.x[12];
acadoVariables.u[13] += acadoWorkspace.x[13];
acadoVariables.u[14] += acadoWorkspace.x[14];
acadoVariables.u[15] += acadoWorkspace.x[15];
acadoVariables.u[16] += acadoWorkspace.x[16];
acadoVariables.u[17] += acadoWorkspace.x[17];
acadoVariables.u[18] += acadoWorkspace.x[18];
acadoVariables.u[19] += acadoWorkspace.x[19];
acadoVariables.u[20] += acadoWorkspace.x[20];
acadoVariables.u[21] += acadoWorkspace.x[21];
acadoVariables.u[22] += acadoWorkspace.x[22];
acadoVariables.u[23] += acadoWorkspace.x[23];
acadoVariables.u[24] += acadoWorkspace.x[24];
acadoVariables.u[25] += acadoWorkspace.x[25];
acadoVariables.u[26] += acadoWorkspace.x[26];
acadoVariables.u[27] += acadoWorkspace.x[27];
acadoVariables.u[28] += acadoWorkspace.x[28];
acadoVariables.u[29] += acadoWorkspace.x[29];
acadoVariables.u[30] += acadoWorkspace.x[30];
acadoVariables.u[31] += acadoWorkspace.x[31];
acadoVariables.x[0] += acadoWorkspace.Dx0[0];
acadoVariables.x[1] += acadoWorkspace.Dx0[1];
acadoVariables.x[2] += acadoWorkspace.Dx0[2];
acadoVariables.x[3] += acadoWorkspace.Dx0[3];
acadoVariables.x[4] += + acadoWorkspace.evGx[0]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[1]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[2]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[3]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[0];
acadoVariables.x[5] += + acadoWorkspace.evGx[4]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[5]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[6]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[7]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[1];
acadoVariables.x[6] += + acadoWorkspace.evGx[8]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[9]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[10]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[11]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[2];
acadoVariables.x[7] += + acadoWorkspace.evGx[12]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[13]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[14]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[15]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[3];
acadoVariables.x[8] += + acadoWorkspace.evGx[16]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[17]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[18]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[19]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[4];
acadoVariables.x[9] += + acadoWorkspace.evGx[20]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[21]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[22]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[23]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[5];
acadoVariables.x[10] += + acadoWorkspace.evGx[24]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[25]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[26]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[27]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[6];
acadoVariables.x[11] += + acadoWorkspace.evGx[28]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[29]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[30]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[31]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[7];
acadoVariables.x[12] += + acadoWorkspace.evGx[32]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[33]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[34]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[35]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[8];
acadoVariables.x[13] += + acadoWorkspace.evGx[36]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[37]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[38]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[39]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[9];
acadoVariables.x[14] += + acadoWorkspace.evGx[40]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[41]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[42]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[43]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[10];
acadoVariables.x[15] += + acadoWorkspace.evGx[44]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[45]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[46]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[47]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[11];
acadoVariables.x[16] += + acadoWorkspace.evGx[48]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[49]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[50]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[51]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[12];
acadoVariables.x[17] += + acadoWorkspace.evGx[52]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[53]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[54]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[55]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[13];
acadoVariables.x[18] += + acadoWorkspace.evGx[56]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[57]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[58]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[59]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[14];
acadoVariables.x[19] += + acadoWorkspace.evGx[60]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[61]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[62]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[63]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[15];
acadoVariables.x[20] += + acadoWorkspace.evGx[64]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[65]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[66]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[67]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[16];
acadoVariables.x[21] += + acadoWorkspace.evGx[68]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[69]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[70]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[71]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[17];
acadoVariables.x[22] += + acadoWorkspace.evGx[72]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[73]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[74]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[75]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[18];
acadoVariables.x[23] += + acadoWorkspace.evGx[76]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[77]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[78]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[79]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[19];
acadoVariables.x[24] += + acadoWorkspace.evGx[80]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[81]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[82]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[83]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[20];
acadoVariables.x[25] += + acadoWorkspace.evGx[84]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[85]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[86]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[87]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[21];
acadoVariables.x[26] += + acadoWorkspace.evGx[88]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[89]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[90]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[91]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[22];
acadoVariables.x[27] += + acadoWorkspace.evGx[92]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[93]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[94]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[95]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[23];
acadoVariables.x[28] += + acadoWorkspace.evGx[96]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[97]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[98]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[99]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[24];
acadoVariables.x[29] += + acadoWorkspace.evGx[100]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[101]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[102]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[103]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[25];
acadoVariables.x[30] += + acadoWorkspace.evGx[104]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[105]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[106]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[107]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[26];
acadoVariables.x[31] += + acadoWorkspace.evGx[108]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[109]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[110]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[111]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[27];
acadoVariables.x[32] += + acadoWorkspace.evGx[112]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[113]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[114]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[115]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[28];
acadoVariables.x[33] += + acadoWorkspace.evGx[116]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[117]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[118]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[119]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[29];
acadoVariables.x[34] += + acadoWorkspace.evGx[120]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[121]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[122]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[123]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[30];
acadoVariables.x[35] += + acadoWorkspace.evGx[124]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[125]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[126]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[127]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[31];
acadoVariables.x[36] += + acadoWorkspace.evGx[128]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[129]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[130]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[131]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[32];
acadoVariables.x[37] += + acadoWorkspace.evGx[132]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[133]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[134]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[135]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[33];
acadoVariables.x[38] += + acadoWorkspace.evGx[136]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[137]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[138]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[139]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[34];
acadoVariables.x[39] += + acadoWorkspace.evGx[140]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[141]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[142]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[143]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[35];
acadoVariables.x[40] += + acadoWorkspace.evGx[144]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[145]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[146]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[147]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[36];
acadoVariables.x[41] += + acadoWorkspace.evGx[148]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[149]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[150]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[151]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[37];
acadoVariables.x[42] += + acadoWorkspace.evGx[152]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[153]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[154]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[155]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[38];
acadoVariables.x[43] += + acadoWorkspace.evGx[156]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[157]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[158]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[159]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[39];
acadoVariables.x[44] += + acadoWorkspace.evGx[160]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[161]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[162]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[163]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[40];
acadoVariables.x[45] += + acadoWorkspace.evGx[164]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[165]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[166]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[167]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[41];
acadoVariables.x[46] += + acadoWorkspace.evGx[168]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[169]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[170]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[171]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[42];
acadoVariables.x[47] += + acadoWorkspace.evGx[172]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[173]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[174]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[175]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[43];
acadoVariables.x[48] += + acadoWorkspace.evGx[176]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[177]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[178]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[179]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[44];
acadoVariables.x[49] += + acadoWorkspace.evGx[180]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[181]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[182]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[183]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[45];
acadoVariables.x[50] += + acadoWorkspace.evGx[184]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[185]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[186]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[187]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[46];
acadoVariables.x[51] += + acadoWorkspace.evGx[188]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[189]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[190]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[191]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[47];
acadoVariables.x[52] += + acadoWorkspace.evGx[192]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[193]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[194]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[195]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[48];
acadoVariables.x[53] += + acadoWorkspace.evGx[196]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[197]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[198]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[199]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[49];
acadoVariables.x[54] += + acadoWorkspace.evGx[200]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[201]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[202]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[203]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[50];
acadoVariables.x[55] += + acadoWorkspace.evGx[204]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[205]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[206]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[207]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[51];
acadoVariables.x[56] += + acadoWorkspace.evGx[208]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[209]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[210]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[211]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[52];
acadoVariables.x[57] += + acadoWorkspace.evGx[212]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[213]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[214]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[215]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[53];
acadoVariables.x[58] += + acadoWorkspace.evGx[216]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[217]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[218]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[219]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[54];
acadoVariables.x[59] += + acadoWorkspace.evGx[220]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[221]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[222]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[223]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[55];
acadoVariables.x[60] += + acadoWorkspace.evGx[224]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[225]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[226]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[227]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[56];
acadoVariables.x[61] += + acadoWorkspace.evGx[228]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[229]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[230]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[231]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[57];
acadoVariables.x[62] += + acadoWorkspace.evGx[232]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[233]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[234]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[235]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[58];
acadoVariables.x[63] += + acadoWorkspace.evGx[236]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[237]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[238]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[239]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[59];
acadoVariables.x[64] += + acadoWorkspace.evGx[240]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[241]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[242]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[243]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[60];
acadoVariables.x[65] += + acadoWorkspace.evGx[244]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[245]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[246]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[247]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[61];
acadoVariables.x[66] += + acadoWorkspace.evGx[248]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[249]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[250]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[251]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[62];
acadoVariables.x[67] += + acadoWorkspace.evGx[252]*acadoWorkspace.Dx0[0] + acadoWorkspace.evGx[253]*acadoWorkspace.Dx0[1] + acadoWorkspace.evGx[254]*acadoWorkspace.Dx0[2] + acadoWorkspace.evGx[255]*acadoWorkspace.Dx0[3] + acadoWorkspace.d[63];
acado_multEDu( acadoWorkspace.E, acadoWorkspace.x, &(acadoVariables.x[ 4 ]) );
acado_multEDu( &(acadoWorkspace.E[ 8 ]), acadoWorkspace.x, &(acadoVariables.x[ 8 ]) );
acado_multEDu( &(acadoWorkspace.E[ 16 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 8 ]) );
acado_multEDu( &(acadoWorkspace.E[ 24 ]), acadoWorkspace.x, &(acadoVariables.x[ 12 ]) );
acado_multEDu( &(acadoWorkspace.E[ 32 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 12 ]) );
acado_multEDu( &(acadoWorkspace.E[ 40 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 12 ]) );
acado_multEDu( &(acadoWorkspace.E[ 48 ]), acadoWorkspace.x, &(acadoVariables.x[ 16 ]) );
acado_multEDu( &(acadoWorkspace.E[ 56 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 16 ]) );
acado_multEDu( &(acadoWorkspace.E[ 64 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 16 ]) );
acado_multEDu( &(acadoWorkspace.E[ 72 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 16 ]) );
acado_multEDu( &(acadoWorkspace.E[ 80 ]), acadoWorkspace.x, &(acadoVariables.x[ 20 ]) );
acado_multEDu( &(acadoWorkspace.E[ 88 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 20 ]) );
acado_multEDu( &(acadoWorkspace.E[ 96 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 20 ]) );
acado_multEDu( &(acadoWorkspace.E[ 104 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 20 ]) );
acado_multEDu( &(acadoWorkspace.E[ 112 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 20 ]) );
acado_multEDu( &(acadoWorkspace.E[ 120 ]), acadoWorkspace.x, &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 128 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 136 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 144 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 152 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 160 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 24 ]) );
acado_multEDu( &(acadoWorkspace.E[ 168 ]), acadoWorkspace.x, &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 176 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 184 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 192 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 200 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 208 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 216 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 28 ]) );
acado_multEDu( &(acadoWorkspace.E[ 224 ]), acadoWorkspace.x, &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 232 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 240 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 248 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 256 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 264 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 272 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 280 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 32 ]) );
acado_multEDu( &(acadoWorkspace.E[ 288 ]), acadoWorkspace.x, &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 296 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 304 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 312 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 320 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 328 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 336 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 344 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 352 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 36 ]) );
acado_multEDu( &(acadoWorkspace.E[ 360 ]), acadoWorkspace.x, &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 368 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 376 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 384 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 392 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 400 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 408 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 416 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 424 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 432 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 40 ]) );
acado_multEDu( &(acadoWorkspace.E[ 440 ]), acadoWorkspace.x, &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 448 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 456 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 464 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 472 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 480 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 488 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 496 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 504 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 512 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 520 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 44 ]) );
acado_multEDu( &(acadoWorkspace.E[ 528 ]), acadoWorkspace.x, &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 536 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 544 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 552 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 560 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 568 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 576 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 584 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 592 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 600 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 608 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 616 ]), &(acadoWorkspace.x[ 22 ]), &(acadoVariables.x[ 48 ]) );
acado_multEDu( &(acadoWorkspace.E[ 624 ]), acadoWorkspace.x, &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 632 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 640 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 648 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 656 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 664 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 672 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 680 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 688 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 696 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 704 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 712 ]), &(acadoWorkspace.x[ 22 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 720 ]), &(acadoWorkspace.x[ 24 ]), &(acadoVariables.x[ 52 ]) );
acado_multEDu( &(acadoWorkspace.E[ 728 ]), acadoWorkspace.x, &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 736 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 744 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 752 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 760 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 768 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 776 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 784 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 792 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 800 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 808 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 816 ]), &(acadoWorkspace.x[ 22 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 824 ]), &(acadoWorkspace.x[ 24 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 832 ]), &(acadoWorkspace.x[ 26 ]), &(acadoVariables.x[ 56 ]) );
acado_multEDu( &(acadoWorkspace.E[ 840 ]), acadoWorkspace.x, &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 848 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 856 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 864 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 872 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 880 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 888 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 896 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 904 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 912 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 920 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 928 ]), &(acadoWorkspace.x[ 22 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 936 ]), &(acadoWorkspace.x[ 24 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 944 ]), &(acadoWorkspace.x[ 26 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 952 ]), &(acadoWorkspace.x[ 28 ]), &(acadoVariables.x[ 60 ]) );
acado_multEDu( &(acadoWorkspace.E[ 960 ]), acadoWorkspace.x, &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 968 ]), &(acadoWorkspace.x[ 2 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 976 ]), &(acadoWorkspace.x[ 4 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 984 ]), &(acadoWorkspace.x[ 6 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 992 ]), &(acadoWorkspace.x[ 8 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1000 ]), &(acadoWorkspace.x[ 10 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1008 ]), &(acadoWorkspace.x[ 12 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1016 ]), &(acadoWorkspace.x[ 14 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1024 ]), &(acadoWorkspace.x[ 16 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1032 ]), &(acadoWorkspace.x[ 18 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1040 ]), &(acadoWorkspace.x[ 20 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1048 ]), &(acadoWorkspace.x[ 22 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1056 ]), &(acadoWorkspace.x[ 24 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1064 ]), &(acadoWorkspace.x[ 26 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1072 ]), &(acadoWorkspace.x[ 28 ]), &(acadoVariables.x[ 64 ]) );
acado_multEDu( &(acadoWorkspace.E[ 1080 ]), &(acadoWorkspace.x[ 30 ]), &(acadoVariables.x[ 64 ]) );
}
int acado_preparationStep( )
{
int ret;
ret = acado_modelSimulation();
acado_evaluateObjective( );
acado_condensePrep( );
return ret;
}
int acado_feedbackStep( )
{
int tmp;
acado_condenseFdb( );
tmp = acado_solve( );
acado_expand( );
return tmp;
}
int acado_initializeSolver( )
{
int ret;
/* This is a function which must be called once before any other function call! */
ret = 0;
memset(&acadoWorkspace, 0, sizeof( acadoWorkspace ));
return ret;
}
void acado_initializeNodesByForwardSimulation( )
{
int index;
for (index = 0; index < 16; ++index)
{
state[0] = acadoVariables.x[index * 4];
state[1] = acadoVariables.x[index * 4 + 1];
state[2] = acadoVariables.x[index * 4 + 2];
state[3] = acadoVariables.x[index * 4 + 3];
state[28] = acadoVariables.u[index * 2];
state[29] = acadoVariables.u[index * 2 + 1];
acado_integrate(state, index == 0);
acadoVariables.x[index * 4 + 4] = state[0];
acadoVariables.x[index * 4 + 5] = state[1];
acadoVariables.x[index * 4 + 6] = state[2];
acadoVariables.x[index * 4 + 7] = state[3];
}
}
void acado_shiftStates( int strategy, real_t* const xEnd, real_t* const uEnd )
{
int index;
for (index = 0; index < 16; ++index)
{
acadoVariables.x[index * 4] = acadoVariables.x[index * 4 + 4];
acadoVariables.x[index * 4 + 1] = acadoVariables.x[index * 4 + 5];
acadoVariables.x[index * 4 + 2] = acadoVariables.x[index * 4 + 6];
acadoVariables.x[index * 4 + 3] = acadoVariables.x[index * 4 + 7];
}
if (strategy == 1 && xEnd != 0)
{
acadoVariables.x[64] = xEnd[0];
acadoVariables.x[65] = xEnd[1];
acadoVariables.x[66] = xEnd[2];
acadoVariables.x[67] = xEnd[3];
}
else if (strategy == 2)
{
state[0] = acadoVariables.x[64];
state[1] = acadoVariables.x[65];
state[2] = acadoVariables.x[66];
state[3] = acadoVariables.x[67];
if (uEnd != 0)
{
state[28] = uEnd[0];
state[29] = uEnd[1];
}
else
{
state[28] = acadoVariables.u[30];
state[29] = acadoVariables.u[31];
}
acado_integrate(state, 1);
acadoVariables.x[64] = state[0];
acadoVariables.x[65] = state[1];
acadoVariables.x[66] = state[2];
acadoVariables.x[67] = state[3];
}
}
void acado_shiftControls( real_t* const uEnd )
{
int index;
for (index = 0; index < 15; ++index)
{
acadoVariables.u[index * 2] = acadoVariables.u[index * 2 + 2];
acadoVariables.u[index * 2 + 1] = acadoVariables.u[index * 2 + 3];
}
if (uEnd != 0)
{
acadoVariables.u[30] = uEnd[0];
acadoVariables.u[31] = uEnd[1];
}
}
real_t acado_getKKT( )
{
real_t kkt;
int index;
real_t prd;
kkt = + acadoWorkspace.g[0]*acadoWorkspace.x[0] + acadoWorkspace.g[1]*acadoWorkspace.x[1] + acadoWorkspace.g[2]*acadoWorkspace.x[2] + acadoWorkspace.g[3]*acadoWorkspace.x[3] + acadoWorkspace.g[4]*acadoWorkspace.x[4] + acadoWorkspace.g[5]*acadoWorkspace.x[5] + acadoWorkspace.g[6]*acadoWorkspace.x[6] + acadoWorkspace.g[7]*acadoWorkspace.x[7] + acadoWorkspace.g[8]*acadoWorkspace.x[8] + acadoWorkspace.g[9]*acadoWorkspace.x[9] + acadoWorkspace.g[10]*acadoWorkspace.x[10] + acadoWorkspace.g[11]*acadoWorkspace.x[11] + acadoWorkspace.g[12]*acadoWorkspace.x[12] + acadoWorkspace.g[13]*acadoWorkspace.x[13] + acadoWorkspace.g[14]*acadoWorkspace.x[14] + acadoWorkspace.g[15]*acadoWorkspace.x[15] + acadoWorkspace.g[16]*acadoWorkspace.x[16] + acadoWorkspace.g[17]*acadoWorkspace.x[17] + acadoWorkspace.g[18]*acadoWorkspace.x[18] + acadoWorkspace.g[19]*acadoWorkspace.x[19] + acadoWorkspace.g[20]*acadoWorkspace.x[20] + acadoWorkspace.g[21]*acadoWorkspace.x[21] + acadoWorkspace.g[22]*acadoWorkspace.x[22] + acadoWorkspace.g[23]*acadoWorkspace.x[23] + acadoWorkspace.g[24]*acadoWorkspace.x[24] + acadoWorkspace.g[25]*acadoWorkspace.x[25] + acadoWorkspace.g[26]*acadoWorkspace.x[26] + acadoWorkspace.g[27]*acadoWorkspace.x[27] + acadoWorkspace.g[28]*acadoWorkspace.x[28] + acadoWorkspace.g[29]*acadoWorkspace.x[29] + acadoWorkspace.g[30]*acadoWorkspace.x[30] + acadoWorkspace.g[31]*acadoWorkspace.x[31];
kkt = fabs( kkt );
for (index = 0; index < 32; ++index)
{
prd = acadoWorkspace.y[index];
if (prd > 1e-12)
kkt += fabs(acadoWorkspace.lb[index] * prd);
else if (prd < -1e-12)
kkt += fabs(acadoWorkspace.ub[index] * prd);
}
return kkt;
}
real_t acado_getObjective( )
{
real_t objVal;
int lRun1;
/** Row vector of size: 6 */
real_t tmpDy[ 6 ];
/** Row vector of size: 4 */
real_t tmpDyN[ 4 ];
for (lRun1 = 0; lRun1 < 16; ++lRun1)
{
acadoWorkspace.objValueIn[0] = acadoVariables.x[lRun1 * 4];
acadoWorkspace.objValueIn[1] = acadoVariables.x[lRun1 * 4 + 1];
acadoWorkspace.objValueIn[2] = acadoVariables.x[lRun1 * 4 + 2];
acadoWorkspace.objValueIn[3] = acadoVariables.x[lRun1 * 4 + 3];
acadoWorkspace.objValueIn[4] = acadoVariables.u[lRun1 * 2];
acadoWorkspace.objValueIn[5] = acadoVariables.u[lRun1 * 2 + 1];
acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.Dy[lRun1 * 6] = acadoWorkspace.objValueOut[0] - acadoVariables.y[lRun1 * 6];
acadoWorkspace.Dy[lRun1 * 6 + 1] = acadoWorkspace.objValueOut[1] - acadoVariables.y[lRun1 * 6 + 1];
acadoWorkspace.Dy[lRun1 * 6 + 2] = acadoWorkspace.objValueOut[2] - acadoVariables.y[lRun1 * 6 + 2];
acadoWorkspace.Dy[lRun1 * 6 + 3] = acadoWorkspace.objValueOut[3] - acadoVariables.y[lRun1 * 6 + 3];
acadoWorkspace.Dy[lRun1 * 6 + 4] = acadoWorkspace.objValueOut[4] - acadoVariables.y[lRun1 * 6 + 4];
acadoWorkspace.Dy[lRun1 * 6 + 5] = acadoWorkspace.objValueOut[5] - acadoVariables.y[lRun1 * 6 + 5];
}
acadoWorkspace.objValueIn[0] = acadoVariables.x[64];
acadoWorkspace.objValueIn[1] = acadoVariables.x[65];
acadoWorkspace.objValueIn[2] = acadoVariables.x[66];
acadoWorkspace.objValueIn[3] = acadoVariables.x[67];
acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut );
acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0] - acadoVariables.yN[0];
acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1] - acadoVariables.yN[1];
acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2] - acadoVariables.yN[2];
acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3] - acadoVariables.yN[3];
objVal = 0.0000000000000000e+00;
for (lRun1 = 0; lRun1 < 16; ++lRun1)
{
tmpDy[0] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 6] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 12] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 18] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 24] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 30];
tmpDy[1] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36 + 1] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 7] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 13] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 19] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 25] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 31];
tmpDy[2] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36 + 2] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 8] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 14] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 20] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 26] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 32];
tmpDy[3] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36 + 3] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 9] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 15] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 21] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 27] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 33];
tmpDy[4] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36 + 4] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 10] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 16] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 22] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 28] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 34];
tmpDy[5] = + acadoWorkspace.Dy[lRun1 * 6]*acadoVariables.W[lRun1 * 36 + 5] + acadoWorkspace.Dy[lRun1 * 6 + 1]*acadoVariables.W[lRun1 * 36 + 11] + acadoWorkspace.Dy[lRun1 * 6 + 2]*acadoVariables.W[lRun1 * 36 + 17] + acadoWorkspace.Dy[lRun1 * 6 + 3]*acadoVariables.W[lRun1 * 36 + 23] + acadoWorkspace.Dy[lRun1 * 6 + 4]*acadoVariables.W[lRun1 * 36 + 29] + acadoWorkspace.Dy[lRun1 * 6 + 5]*acadoVariables.W[lRun1 * 36 + 35];
objVal += + acadoWorkspace.Dy[lRun1 * 6]*tmpDy[0] + acadoWorkspace.Dy[lRun1 * 6 + 1]*tmpDy[1] + acadoWorkspace.Dy[lRun1 * 6 + 2]*tmpDy[2] + acadoWorkspace.Dy[lRun1 * 6 + 3]*tmpDy[3] + acadoWorkspace.Dy[lRun1 * 6 + 4]*tmpDy[4] + acadoWorkspace.Dy[lRun1 * 6 + 5]*tmpDy[5];
}
tmpDyN[0] = + acadoWorkspace.DyN[0]*acadoVariables.WN[0];
tmpDyN[1] = + acadoWorkspace.DyN[1]*acadoVariables.WN[5];
tmpDyN[2] = + acadoWorkspace.DyN[2]*acadoVariables.WN[10];
tmpDyN[3] = + acadoWorkspace.DyN[3]*acadoVariables.WN[15];
objVal += + acadoWorkspace.DyN[0]*tmpDyN[0] + acadoWorkspace.DyN[1]*tmpDyN[1] + acadoWorkspace.DyN[2]*tmpDyN[2] + acadoWorkspace.DyN[3]*tmpDyN[3];
objVal *= 0.5;
return objVal;
}
|
CPSfield_impl.h | #ifndef CPS_FIELD_IMPL
#define CPS_FIELD_IMPL
//Implementations of CPSfield.h
//Real-reduce for norm2
template<typename T>
struct normdefs{};
template<>
struct normdefs<double>{
inline static double real_reduce(const double in){ return in; }
inline static double conjugate(const double in){ return in; }
};
template<>
struct normdefs<float>{
inline static double real_reduce(const float in){ return in; }
inline static float conjugate(const float in){ return in; }
};
template<typename T>
struct normdefs<std::complex<T> >{
inline static double real_reduce(const std::complex<T> in){ return in.real(); }
inline static std::complex<T> conjugate(const std::complex<T> in){ return std::conj(in); }
};
#ifdef USE_GRID
template<>
struct normdefs<Grid::vRealD>{
inline static double real_reduce(const Grid::vRealD in){ return Reduce(in); }
inline static Grid::vRealD conjugate(const Grid::vRealD in){ return in; }
};
template<>
struct normdefs<Grid::vRealF>{
inline static double real_reduce(const Grid::vRealF in){ return Reduce(in); }
inline static Grid::vRealF conjugate(const Grid::vRealF in){ return in; }
};
template<>
struct normdefs<Grid::vComplexD>{
inline static double real_reduce(const Grid::vComplexD in){ return std::real(Reduce(in)); }
inline static Grid::vComplexD conjugate(const Grid::vComplexD in){ return Grid::conjugate(in); }
};
template<>
struct normdefs<Grid::vComplexF>{
inline static double real_reduce(const Grid::vComplexF in){ return std::real(Reduce(in)); }
inline static Grid::vComplexF conjugate(const Grid::vComplexF in){ return Grid::conjugate(in); }
};
#endif
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
double CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::norm2() const{
SiteType accum[omp_get_max_threads()];
memset(accum, 0, omp_get_max_threads()*sizeof(SiteType));
#pragma omp parallel for
for(int i=0;i<this->nfsites();i++){
SiteType const *site = this->fsite_ptr(i);
for(int s=0;s<SiteSize;s++)
accum[omp_get_thread_num()] = accum[omp_get_thread_num()] + normdefs<SiteType>::conjugate(site[s])*site[s];
}
SiteType total;
memset(&total, 0, sizeof(SiteType));
for(int i=0;i<omp_get_max_threads();i++)
total = total + accum[i];
double final = normdefs<SiteType>::real_reduce(total);
glb_sum_five(&final);
return final;
}
#ifdef USE_GRID
template<typename T,typename CPScomplex>
struct GridTensorConvert{};
template<typename complex_scalar, typename CPScomplex>
struct GridTensorConvert<Grid::QCD::iSpinColourVector<complex_scalar>, CPScomplex>{
static_assert(!Grid::isSIMDvectorized<complex_scalar>::value && Grid::isComplex<complex_scalar>::value, "Only applies to scalar complex types");
//12-component complex spin-color vector
//We have assured the input is not SIMD vectorized so the output type is the same
inline static void doit(CPScomplex* cps, const Grid::QCD::iSpinColourVector<complex_scalar> &grid, const int f){
for(int s=0;s<Grid::QCD::Ns;s++)
for(int c=0;c<Grid::QCD::Nc;c++)
*cps++ = grid()(s)(c);
}
inline static void doit(Grid::QCD::iSpinColourVector<complex_scalar> &grid, CPScomplex const* cps, const int f){
for(int s=0;s<Grid::QCD::Ns;s++)
for(int c=0;c<Grid::QCD::Nc;c++)
grid()(s)(c) = *cps++;
}
};
template<typename complex_scalar, typename CPScomplex>
struct GridTensorConvert<Grid::QCD::iGparitySpinColourVector<complex_scalar>, CPScomplex>{
static_assert(!Grid::isSIMDvectorized<complex_scalar>::value && Grid::isComplex<complex_scalar>::value, "Only applies to scalar complex types");
//12-component complex spin-color vector
//We have assured the input is not SIMD vectorized so the output type is the same
inline static void doit(CPScomplex* cps, const Grid::QCD::iGparitySpinColourVector<complex_scalar> &grid, const int f){
for(int s=0;s<Grid::QCD::Ns;s++)
for(int c=0;c<Grid::QCD::Nc;c++)
*cps++ = grid(f)(s)(c);
}
inline static void doit(Grid::QCD::iGparitySpinColourVector<complex_scalar> &grid, CPScomplex const* cps, const int f){
for(int s=0;s<Grid::QCD::Ns;s++)
for(int c=0;c<Grid::QCD::Nc;c++)
grid(f)(s)(c) = *cps++;
}
};
template<typename complex_scalar, typename CPScomplex>
struct GridTensorConvert<Grid::QCD::iLorentzColourMatrix<complex_scalar>, CPScomplex>{
static_assert(!Grid::isSIMDvectorized<complex_scalar>::value && Grid::isComplex<complex_scalar>::value, "Only applies to scalar complex types");
//Gauge field mu=0..3 3*3 complex
//We have assured the input is not SIMD vectorized so the output type is the same
inline static void doit(CPScomplex* cps, const Grid::QCD::iLorentzColourMatrix<complex_scalar> &grid, const int f){
for(int mu=0;mu<4;mu++)
for(int i=0;i<3;i++)
for(int j=0;j<3;j++)
*cps++ = grid(mu)()(i,j);
}
inline static void doit(Grid::QCD::iLorentzColourMatrix<complex_scalar> &grid, CPScomplex const* cps, const int f){
for(int mu=0;mu<4;mu++)
for(int i=0;i<3;i++)
for(int j=0;j<3;j++)
grid(mu)()(i,j) = *cps++;
}
};
template<int Ndim>
struct dimensionMap{};
template<>
struct dimensionMap<5>{
const int cps_to_grid[5] = {1,2,3,4,0};
const int grid_to_cps[5] = {4,0,1,2,3};
};
template<>
struct dimensionMap<4>{
const int cps_to_grid[4] = {0,1,2,3};
const int grid_to_cps[4] = {0,1,2,3};
};
template<typename Type, int SiteSize, typename DimPol, typename FlavPol, typename AllocPol,
typename GridField, typename ComplexClass>
class CPSfieldGridImpex{};
template<typename Type, int SiteSize, typename DimPol, typename FlavPol, typename AllocPol,
typename GridField>
class CPSfieldGridImpex<Type,SiteSize,DimPol,FlavPol,AllocPol,GridField,complex_double_or_float_mark>{
typedef CPSfield<Type,SiteSize,DimPol,FlavPol,AllocPol> CPSfieldType;
public:
static void import(CPSfieldType &into, const GridField &from){
const int Nd = DimPol::EuclideanDimension;
assert(Nd == from._grid->Nd());
dimensionMap<CPSfieldType::EuclideanDimension> dim_map;
typedef typename Grid::GridTypeMapper<typename GridField::vector_object>::scalar_object sobj;
#pragma omp parallel for
for(int site=0;site<into.nsites();site++){
std::vector<int> x(Nd);
into.siteUnmap(site, &x[0]);
std::vector<int> grid_x(Nd);
for(int i=0;i<Nd;i++)
grid_x[ dim_map.cps_to_grid[i] ] = x[i];
sobj siteGrid; //contains both flavors if Gparity
peekLocalSite(siteGrid,from,grid_x);
for(int f=0;f<into.nflavors();f++){
typename CPSfieldType::FieldSiteType *cps = into.site_ptr(site,f);
GridTensorConvert<sobj, typename CPSfieldType::FieldSiteType>::doit(cps, siteGrid, f);
}
}
}
static void exportit(GridField &into, const CPSfieldType &from){
const int Nd = DimPol::EuclideanDimension;
assert(Nd == into._grid->Nd());
dimensionMap<CPSfieldType::EuclideanDimension> dim_map;
typedef typename Grid::GridTypeMapper<typename GridField::vector_object>::scalar_object sobj;
int nthread = omp_get_max_threads();
int nsimd = into._grid->Nsimd();
std::vector<std::vector<sobj> > tstore(nthread,std::vector<sobj>(nsimd)); //thread-individual temp storage for Grid-converted tensors
std::vector<std::vector<sobj*> > tstore_ptrs(nthread,std::vector<sobj*>(nsimd));
for(int i=0;i<nthread;i++)
for(int j=0;j<nsimd;j++)
tstore_ptrs[i][j] = &tstore[i][j];
std::vector<std::vector<int> > out_icoor(nsimd); //store inner coordinate offsets
for(int i=0;i<nsimd;i++){
out_icoor[i].resize(Nd);
into._grid->iCoorFromIindex(out_icoor[i], i);
}
#pragma omp parallel for
for(int out_oidx=0;out_oidx<into._grid->oSites();out_oidx++){
int me = omp_get_thread_num();
std::vector<int> out_ocoor(Nd);
into._grid->oCoorFromOindex(out_ocoor, out_oidx);
std::vector<int> lcoor(Nd);
std::vector<int> lcoor_cps(Nd);
for(int lane=0; lane < nsimd; lane++){
for(int mu=0;mu<Nd;mu++){
lcoor[mu] = out_ocoor[mu] + into._grid->_rdimensions[mu]*out_icoor[lane][mu];
lcoor_cps[ dim_map.grid_to_cps[mu] ] = lcoor[mu];
}
int cps_site = from.siteMap(&lcoor_cps[0]);
for(int f=0;f<from.nflavors();f++){
typename CPSfieldType::FieldSiteType const* cps = from.site_ptr(cps_site,f);
GridTensorConvert<sobj, typename CPSfieldType::FieldSiteType>::doit(tstore[me][lane], cps, f);
}
}
merge(into._odata[out_oidx], tstore_ptrs[me], 0);
}
}
};
#ifdef USE_GRID
template<typename Type, int SiteSize, typename DimPol, typename FlavPol, typename AllocPol,
typename GridField>
class CPSfieldGridImpex<Type,SiteSize,DimPol,FlavPol,AllocPol,GridField,grid_vector_complex_mark>{
typedef CPSfield<Type,SiteSize,DimPol,FlavPol,AllocPol> CPSfieldType;
public:
static void import(CPSfieldType &into, const GridField &from){
const int Nd = DimPol::EuclideanDimension;
assert(Nd == from._grid->Nd());
typedef typename Grid::GridTypeMapper<Type>::scalar_type CPSscalarType;
typedef typename ComplexClassify<CPSscalarType>::type CPSscalarTypeClass;
//Create temp CPS unvectorized field
typedef typename StandardDimensionPolicy<DimPol::EuclideanDimension>::type CPSscalarDimPol;
NullObject n;
CPSfield<CPSscalarType,SiteSize,CPSscalarDimPol,FlavPol,StandardAllocPolicy> cps_unpacked(n);
CPSfieldGridImpex<CPSscalarType,SiteSize,CPSscalarDimPol,FlavPol,StandardAllocPolicy,GridField, CPSscalarTypeClass>::import(cps_unpacked,from);
into.importField(cps_unpacked);
}
static void exportit(GridField &into, const CPSfieldType &from){
const int Nd = DimPol::EuclideanDimension;
assert(Nd == into._grid->Nd());
typedef typename Grid::GridTypeMapper<Type>::scalar_type CPSscalarType;
typedef typename ComplexClassify<CPSscalarType>::type CPSscalarTypeClass;
//Create temp CPS unvectorized field
typedef typename StandardDimensionPolicy<DimPol::EuclideanDimension>::type CPSscalarDimPol;
NullObject n;
CPSfield<CPSscalarType,SiteSize,CPSscalarDimPol,FlavPol,StandardAllocPolicy> cps_unpacked(n);
cps_unpacked.importField(from);
CPSfieldGridImpex<CPSscalarType,SiteSize,CPSscalarDimPol,FlavPol,StandardAllocPolicy,GridField, CPSscalarTypeClass>::exportit(into, cps_unpacked);
}
};
#endif
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template<typename GridField>
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::importGridField(const GridField &grid){
typedef typename ComplexClassify<SiteType>::type ComplexClass;
CPSfieldGridImpex<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,GridField,ComplexClass>::import(*this, grid);
}
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template<typename GridField>
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::exportGridField(GridField &grid) const{
typedef typename ComplexClassify<SiteType>::type ComplexClass;
CPSfieldGridImpex<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,GridField,ComplexClass>::exportit(grid,*this);
}
#endif
template<int SiteSize,
typename TypeA, typename DimPolA, typename FlavPolA, typename AllocPolA,
typename TypeB, typename DimPolB, typename FlavPolB, typename AllocPolB,
typename Enable = void>
class CPSfieldCopy;
//Generic copy. SiteSize and number of Euclidean dimensions must be the same
#ifdef USE_GRID
#define CONDITION sameDim<DimPolA,DimPolB>::val && !Grid::is_simd<TypeA>::value && !Grid::is_simd<TypeB>::value
#else
#define CONDITION sameDim<DimPolA,DimPolB>::val
#endif
template<int SiteSize,
typename TypeA, typename DimPolA, typename FlavPolA, typename AllocPolA,
typename TypeB, typename DimPolB, typename FlavPolB, typename AllocPolB>
class CPSfieldCopy<SiteSize,TypeA,DimPolA,FlavPolA,AllocPolA, TypeB,DimPolB,FlavPolB,AllocPolB, typename my_enable_if<CONDITION,void>::type>{
public:
static void copy(CPSfield<TypeA,SiteSize,DimPolA,FlavPolA,AllocPolA> &into,
const CPSfield<TypeB,SiteSize,DimPolB,FlavPolB,AllocPolB> &from){
assert(into.nfsites() == from.nfsites()); //should be true in # Euclidean dimensions the same, but not guaranteed
#pragma omp parallel for
for(int fs=0;fs<into.nfsites();fs++){
int x[5], f; into.fsiteUnmap(fs,x,f); //doesn't matter if the linearization differs between the two
TypeA* toptr = into.fsite_ptr(fs);
TypeB const* fromptr = from.site_ptr(x,f);
for(int i=0;i<SiteSize;i++) toptr[i] = fromptr[i];
}
}
};
#undef CONDITION
#ifdef USE_GRID
std::string vtostring(const int* v, const int ndim){
std::ostringstream os;
os << '(';
for(int i=0;i<ndim-1;i++) os << v[i] << ", ";
os << v[ndim-1] << ')';
return os.str();
}
//TypeA is Grid_simd type
#define CONDITION sameDim<DimPolA,DimPolB>::val && Grid::is_simd<GridSIMDTypeA>::value && !Grid::is_simd<TypeB>::value
template<int SiteSize,
typename GridSIMDTypeA, typename DimPolA, typename FlavPolA, typename AllocPolA,
typename TypeB, typename DimPolB, typename FlavPolB, typename AllocPolB>
class CPSfieldCopy<SiteSize,
GridSIMDTypeA, DimPolA, FlavPolA, AllocPolA,
TypeB, DimPolB, FlavPolB, AllocPolB, typename my_enable_if<CONDITION,void>::type>
{
public:
static void copy(CPSfield<GridSIMDTypeA,SiteSize,DimPolA,FlavPolA,AllocPolA> &into,
const CPSfield<TypeB,SiteSize,DimPolB,FlavPolB,AllocPolB> &from){
const int nsimd = GridSIMDTypeA::Nsimd();
const int ndim = DimPolA::EuclideanDimension;
if(from.nfsites()/nsimd != into.nfsites()) ERR.General("CPSfieldCopy","copy(<SIMD field> &into, const <non-SIMD field> &from)","Expected from.nfsites/nsimd = into.nfsites, got %d/%d (=%d) != %d\n",from.nfsites(),nsimd, from.nfsites()/nsimd, into.nfsites());
std::vector<std::vector<int> > packed_offsets(nsimd,std::vector<int>(ndim));
for(int i=0;i<nsimd;i++){
into.SIMDunmap(i,&packed_offsets[i][0]);
}
#pragma omp parallel for
for(int fs=0;fs<into.nfsites();fs++){
int x[ndim], f; into.fsiteUnmap(fs,x,f);
GridSIMDTypeA* toptr = into.fsite_ptr(fs);
//x is the root coordinate corresponding to SIMD packed index 0
std::vector<TypeB const*> ptrs(nsimd);
ptrs[0] = from.site_ptr(x,f);
int xx[ndim];
for(int i=1;i<nsimd;i++){
for(int d=0;d<ndim;d++)
xx[d] = x[d] + packed_offsets[i][d]; //xx = x + offset
ptrs[i] = from.site_ptr(xx,f);
}
into.SIMDpack(toptr, ptrs, SiteSize);
}
}
};
#undef CONDITION
//TypeB is Grid_simd type
#define CONDITION sameDim<DimPolA,DimPolB>::val && !Grid::is_simd<TypeA>::value && Grid::is_simd<GridSIMDTypeB>::value
template<int SiteSize,
typename TypeA, typename DimPolA, typename FlavPolA, typename AllocPolA,
typename GridSIMDTypeB, typename DimPolB, typename FlavPolB, typename AllocPolB>
class CPSfieldCopy<SiteSize,
TypeA, DimPolA, FlavPolA, AllocPolA,
GridSIMDTypeB, DimPolB, FlavPolB, AllocPolB, typename my_enable_if<CONDITION,void>::type>
{
public:
static void copy(CPSfield<TypeA,SiteSize,DimPolA,FlavPolA,AllocPolA> &into,
const CPSfield<GridSIMDTypeB,SiteSize,DimPolB,FlavPolB,AllocPolB> &from){
const int nsimd = GridSIMDTypeB::Nsimd();
const int ndim = DimPolA::EuclideanDimension;
if(into.nfsites()/nsimd != from.nfsites()) ERR.General("CPSfieldCopy","copy(<non-SIMD field> &into, const <SIMD-field> &from)","Expected into.nfsites/nsimd = from.nfsites, got %d/%d (=%d) != %d\n",into.nfsites(),nsimd, into.nfsites()/nsimd, from.nfsites());
std::vector<std::vector<int> > packed_offsets(nsimd,std::vector<int>(ndim));
for(int i=0;i<nsimd;i++) from.SIMDunmap(i,&packed_offsets[i][0]);
#pragma omp parallel for
for(int fs=0;fs<from.nfsites();fs++){
int x[ndim], f; from.fsiteUnmap(fs,x,f);
GridSIMDTypeB const* fromptr = from.fsite_ptr(fs);
//x is the root coordinate corresponding to SIMD packed index 0
std::vector<TypeA*> ptrs(nsimd);
ptrs[0] = into.site_ptr(x,f);
int xx[ndim];
for(int i=1;i<nsimd;i++){
for(int d=0;d<ndim;d++)
xx[d] = x[d] + packed_offsets[i][d]; //xx = x + offset
ptrs[i] = into.site_ptr(xx,f);
}
from.SIMDunpack(ptrs, fromptr, SiteSize);
}
}
};
#undef CONDITION
#endif
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template< typename extSiteType, typename extDimPol, typename extFlavPol, typename extAllocPol>
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::importField(const CPSfield<extSiteType,SiteSize,extDimPol,extFlavPol,extAllocPol> &r){
CPSfieldCopy<SiteSize,
SiteType,DimensionPolicy,FlavorPolicy,AllocPolicy,
extSiteType, extDimPol, extFlavPol, extAllocPol>::copy(*this,r);
}
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template< typename extSiteType, typename extDimPol, typename extFlavPol, typename extAllocPol>
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::exportField(CPSfield<extSiteType,SiteSize,extDimPol,extFlavPol,extAllocPol> &r) const{
CPSfieldCopy<SiteSize,
extSiteType, extDimPol, extFlavPol, extAllocPol,
SiteType,DimensionPolicy,FlavorPolicy,AllocPolicy>::copy(r,*this);
}
template<typename SiteType>
class _testRandom{
public:
static void rand(SiteType* f, int fsize, const Float hi, const Float lo){
for(int i=0;i<fsize;i++) f[i] = LRG.Urand(hi,lo,FOUR_D);
}
};
template<typename T>
class _testRandom<std::complex<T> >{
public:
static void rand(std::complex<T>* f, int fsize, const Float hi, const Float lo){
assert(sizeof(std::complex<T>) == 2*sizeof(T));
T* ff = (T*)f;
for(int i=0;i<2*fsize;i++) ff[i] = LRG.Urand(hi,lo,FOUR_D);
}
};
//Set each float to a uniform random number in the specified range.
//WARNING: Uses only the current RNG in LRG, and does not change this based on site. This is therefore only useful for testing*
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::testRandom(const Float hi, const Float lo){
_testRandom<SiteType>::rand(this->f,this->fsize,hi,lo);
}
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
//Set this field to the average of this and a second field, r
void CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::average(const CPSfield<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &r, const bool ¶llel){
//The beauty of having the ordering baked into the policy class is that we implicitly *know* the ordering of the second field, so we can just loop over the floats in a dumb way
if(parallel){
#pragma omp parallel for
for(int i=0;i<fsize;i++) f[i] = (f[i] + r.f[i])/2.0;
}else{
for(int i=0;i<fsize;i++) f[i] = (f[i] + r.f[i])/2.0;
}
}
struct _gauge_fix_site_op_impl{
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy, typename my_enable_if<_equal<typename ComplexClassify<mf_Complex>::type,complex_double_or_float_mark>::value,int>::type = 0>
inline static void gauge_fix_site_op(CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy> &field, const int x4d[], const int &f, Lattice &lat, const bool dagger){
typedef typename mf_Complex::value_type mf_Float;
int i = x4d[0] + GJP.XnodeSites()*( x4d[1] + GJP.YnodeSites()* ( x4d[2] + GJP.ZnodeSites()*x4d[3] ) );
mf_Complex tmp[3];
const Matrix* gfmat = lat.FixGaugeMatrix(i,f);
mf_Complex* sc_base = (mf_Complex*)field.site_ptr(x4d,f); //if Dimension < 4 the site_ptr method will ignore the remaining indices. Make sure this is what you want
for(int s=0;s<4;s++){
memcpy(tmp, sc_base + 3 * s, 3 * sizeof(mf_Complex));
if(!dagger)
colorMatrixMultiplyVector<mf_Float,Float>( (mf_Float*)(sc_base + 3*s), (Float*)gfmat, (mf_Float*)tmp);
else
colorMatrixDaggerMultiplyVector<mf_Float,Float>( (mf_Float*)(sc_base + 3*s), (Float*)gfmat, (mf_Float*)tmp);
}
}
#ifdef USE_GRID
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy, typename my_enable_if<_equal<typename ComplexClassify<mf_Complex>::type,grid_vector_complex_mark>::value,int>::type = 0>
inline static void gauge_fix_site_op(CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy> &field, const int x4d[], const int &f, Lattice &lat, const bool dagger){
//x4d is an outer site index
int nsimd = field.Nsimd();
int ndim = DimensionPolicy::EuclideanDimension;
assert(ndim == 4);
//Assemble pointers to the GF matrices for each lane
std::vector<cps::Complex*> gf_base_ptrs(nsimd);
int x4d_lane[4];
int lane_off[4];
for(int lane=0;lane<nsimd;lane++){
field.SIMDunmap(lane, lane_off);
for(int xx=0;xx<4;xx++) x4d_lane[xx] = x4d[xx] + lane_off[xx];
int gf_off = x4d_lane[0] + GJP.XnodeSites()*( x4d_lane[1] + GJP.YnodeSites()* ( x4d_lane[2] + GJP.ZnodeSites()*x4d_lane[3] ) );
gf_base_ptrs[lane] = (cps::Complex*)lat.FixGaugeMatrix(gf_off,f);
}
//Poke the GFmatrix elements into SIMD vector objects
typedef typename mf_Complex::scalar_type stype;
stype* buf = (stype*)memalign(128, nsimd*sizeof(stype));
mf_Complex gfmat[3][3];
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
for(int lane=0;lane<nsimd;lane++)
buf[lane] = *(gf_base_ptrs[lane] + j + 3*i);
vset(gfmat[i][j], buf);
}
}
free(buf);
//Do the matrix multiplication
mf_Complex* tmp = (mf_Complex*)memalign(128, 3*sizeof(mf_Complex));
mf_Complex* sc_base = field.site_ptr(x4d,f);
for(int s=0;s<4;s++){
mf_Complex* s_base = sc_base + 3 * s;
memcpy(tmp, s_base, 3 * sizeof(mf_Complex));
if(!dagger)
for(int i=0;i<3;i++)
s_base[i] = gfmat[i][0]*tmp[0] + gfmat[i][1]*tmp[1] + gfmat[i][2]*tmp[2];
else
for(int i=0;i<3;i++)
s_base[i] = conjugate(gfmat[0][i])*tmp[0] + conjugate(gfmat[1][i])*tmp[1] + conjugate(gfmat[2][i])*tmp[2];
}
free(tmp);
}
#endif
};
//Apply gauge fixing matrices to the field
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::gauge_fix_site_op(const int x4d[], const int &f, Lattice &lat, const bool dagger){
_gauge_fix_site_op_impl::gauge_fix_site_op(*this, x4d, f, lat,dagger);
}
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::getMomentumUnits(double punits[3]){
for(int i=0;i<3;i++){
int fac;
if(GJP.Bc(i) == BND_CND_PRD) fac = 1;
else if(GJP.Bc(i) == BND_CND_APRD) fac = 2;
else if(GJP.Bc(i) == BND_CND_GPARITY) fac = 4;
else{ ERR.General("CPSfermion","getMomentumUnits","Unknown boundary condition"); }
punits[i] = 6.283185308/(GJP.NodeSites(i)*GJP.Nodes(i)*fac); // 2pi/(fac*L)
}
}
//Apply the phase exp(-ip.x) to each site of this vector, where p is a *three momentum*
//The units of the momentum are 2pi/L for periodic BCs, pi/L for antiperiodic BCs and pi/2L for G-parity BCs
//x_lcl is the site in node lattice coords. 3 or more dimensions (those after 3 are ignored)
struct _apply_phase_site_op_impl{
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy, typename my_enable_if<_equal<typename ComplexClassify<mf_Complex>::type,complex_double_or_float_mark>::value,int>::type = 0>
inline static void apply_phase_site_op(CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy> &field, const int x_lcl[], const int &flav, const int p[], const double punits[]){
assert(DimensionPolicy::EuclideanDimension >= 3);
int x_glb[DimensionPolicy::EuclideanDimension]; for(int i=0;i<DimensionPolicy::EuclideanDimension;i++) x_glb[i] = x_lcl[i] + GJP.NodeCoor(i)*GJP.NodeSites(i);
double phi = 0;
for(int i=0;i<3;i++) phi += p[i]*punits[i]*x_glb[i];
std::complex<double> phase( cos(phi), -sin(phi) );
mf_Complex phase_prec(phase);
mf_Complex *base = field.site_ptr(x_lcl,flav);
for(int sc=0;sc<12;sc++){
mf_Complex* v = base + sc;
(*v) *= phase_prec;
}
}
#ifdef USE_GRID
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy, typename my_enable_if<_equal<typename ComplexClassify<mf_Complex>::type,grid_vector_complex_mark>::value,int>::type = 0>
inline static void apply_phase_site_op(CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy> &field, const int x_lcl[], const int &flav, const int p[], const double punits[]){
assert(DimensionPolicy::EuclideanDimension >= 3);
int nsimd = field.Nsimd();
typedef typename mf_Complex::scalar_type stype;
stype* buf = (stype*)memalign(128, nsimd*sizeof(stype));
int lane_off[DimensionPolicy::EuclideanDimension];
int x_gbl_lane[DimensionPolicy::EuclideanDimension];
for(int lane = 0; lane < nsimd; lane++){
field.SIMDunmap(lane, lane_off);
for(int xx=0;xx<DimensionPolicy::EuclideanDimension;xx++) x_gbl_lane[xx] = x_lcl[xx] + lane_off[xx] + GJP.NodeCoor(xx)*GJP.NodeSites(xx);
double phi = 0;
for(int i=0;i<3;i++) phi += p[i]*punits[i]*x_gbl_lane[i];
buf[lane] = stype( cos(phi), -sin(phi) );
}
mf_Complex vphase;
vset(vphase, buf);
free(buf);
mf_Complex* base = field.site_ptr(x_lcl,flav);
for(int sc=0;sc<12;sc++){
mf_Complex* v = base + sc;
*v = vphase * (*v);
}
}
#endif
};
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::apply_phase_site_op(const int x_lcl[], const int &flav, const int p[], const double punits[]){
_apply_phase_site_op_impl::apply_phase_site_op(*this, x_lcl, flav, p, punits);
}
//Apply gauge fixing matrices to the field
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::gauge_fix_site_op(int fi, Lattice &lat,const bool dagger){
int x4d[4]; int f; this->fsiteUnmap(fi,x4d,f);
CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::gauge_fix_site_op(x4d,f,lat,dagger);
}
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::gaugeFix(Lattice &lat, const bool parallel, const bool dagger){
if(parallel){
#pragma omp parallel for
for(int fi=0;fi<this->nfsites();fi++)
gauge_fix_site_op(fi,lat,dagger);
}else{
for(int fi=0;fi<this->nfsites();fi++)
gauge_fix_site_op(fi,lat,dagger);
}
}
//Apply the phase exp(-ip.x) to each site of this vector, where p is a *three momentum*
//The units of the momentum are 2pi/L for periodic BCs, pi/L for antiperiodic BCs and pi/2L for G-parity BCs
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::apply_phase_site_op(int sf,const int p[],double punits[]){
int x[this->EuclideanDimension]; int f; this->fsiteUnmap(sf,x,f);
CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::apply_phase_site_op(x,f,p,punits);
}
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::applyPhase(const int p[], const bool ¶llel){
const char *fname = "apply_phase(int p[])";
double punits[3];
CPSfermion<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::getMomentumUnits(punits);
if(parallel){
#pragma omp parallel for
for(int sf=0;sf<this->nfsites();sf++)
apply_phase_site_op(sf,p,punits);
}else{
for(int sf=0;sf<this->nfsites();sf++)
apply_phase_site_op(sf,p,punits);
}
}
//Set the real and imaginary parts to uniform random numbers drawn from the appropriate local RNGs
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::setUniformRandom(const Float &hi, const Float &lo){
typedef typename mf_Complex::value_type mf_Float;
LRG.SetInterval(hi,lo);
for(int i = 0; i < this->sites*this->flavors; ++i) {
int flav = i / this->sites;
int st = i % this->sites;
LRG.AssignGenerator(st,flav);
mf_Float *p = (mf_Float*)this->site_ptr(st,flav);
for(int site_lcl_off=0;site_lcl_off<2*FieldSiteSize;site_lcl_off++)
*(p++) = LRG.Urand(FOUR_D);
}
}
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::setGaussianRandom(){
typedef typename mf_Complex::value_type mf_Float;
for(int i = 0; i < this->sites*this->flavors; ++i) {
int flav = i / this->sites;
int st = i % this->sites;
LRG.AssignGenerator(st,flav);
mf_Float *p = (mf_Float*)this->site_ptr(st,flav);
for(int site_lcl_off=0;site_lcl_off<2*FieldSiteSize;site_lcl_off++)
*(p++) = LRG.Grand(FOUR_D);
}
}
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion5D<mf_Complex,FlavorPolicy,AllocPolicy>::setGaussianRandom(){
typedef typename mf_Complex::value_type mf_Float;
for(int i = 0; i < this->sites*this->flavors; ++i) {
int flav = i / this->sites;
int st = i % this->sites;
LRG.AssignGenerator(st,flav);
mf_Float *p = (mf_Float*)this->site_ptr(st,flav);
for(int site_lcl_off=0;site_lcl_off<2*FieldSiteSize;site_lcl_off++)
*(p++) = LRG.Grand(FIVE_D);
}
}
//Gauge fix 3D fermion field with dynamic info type
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
struct _ferm3d_gfix_impl{
static void gaugeFix(CPSfermion3D<mf_Complex,FlavorPolicy,AllocPolicy> &field, Lattice &lat, const typename GaugeFix3DInfo<FlavorPolicy>::InfoType &t, const bool ¶llel){
if(GJP.Gparity() && field.nflavors() == 1) ERR.General("CPSfermion3D","gaugeFix(Lattice &, const int &, const bool &)","For one flavor fields with G-parity enabled, to gauge fix we need to know the flavor of this field\n");
#define LOOP \
for(int fi=0;fi<field.nfsites();fi++){ \
int x4d[4]; int f; field.fsiteUnmap(fi,x4d,f); \
x4d[3] = t; \
field.CPSfermion<mf_Complex,SpatialPolicy,FlavorPolicy>::gauge_fix_site_op(x4d,f,lat); \
}
if(parallel){
#pragma omp parallel for
LOOP;
}else{
LOOP;
}
#undef LOOP
}
};
//Partial specialization for one flavor. We must provide the flavor index for the gauge fixing matrix, i.e. the flavor that this field represents
template< typename mf_Complex, typename AllocPolicy>
struct _ferm3d_gfix_impl<mf_Complex,FixedFlavorPolicy<1>,AllocPolicy>{
static void gaugeFix(CPSfermion3D<mf_Complex,FixedFlavorPolicy<1>,AllocPolicy> &field, Lattice &lat, const typename GaugeFix3DInfo<FixedFlavorPolicy<1> >::InfoType &time_flav, const bool ¶llel){
printf("_ferm3d_gfix_impl::gauge_fix with time=%d, flav=%d\n",time_flav.first,time_flav.second);
typedef typename mf_Complex::value_type mf_Float;
#define SITE_OP \
int x4d[4]; field.siteUnmap(i,x4d); \
x4d[3] = time_flav.first; \
int gfmat_site = x4d[0] + GJP.XnodeSites()*( x4d[1] + GJP.YnodeSites()* ( x4d[2] + GJP.ZnodeSites()*x4d[3] )); \
mf_Complex tmp[3]; \
const Matrix* gfmat = lat.FixGaugeMatrix(gfmat_site,time_flav.second); \
mf_Complex* sc_base = field.site_ptr(x4d); \
for(int s=0;s<4;s++){ \
memcpy(tmp, sc_base + 3 * s, 3 * sizeof(mf_Complex)); \
colorMatrixMultiplyVector<mf_Float,Float>( (mf_Float*)(sc_base + 3*s), (Float*)gfmat, (mf_Float*)tmp); \
}
if(parallel){
#pragma omp parallel for
for(int i=0;i<field.nsites();i++){
SITE_OP;
}
}else{
for(int i=0;i<field.nsites();i++){
SITE_OP;
}
}
#undef SITE_OP
}
};
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion3D<mf_Complex,FlavorPolicy,AllocPolicy>::gaugeFix(Lattice &lat, const typename GaugeFix3DInfo<FlavorPolicy>::InfoType &t, const bool ¶llel){
_ferm3d_gfix_impl<mf_Complex,FlavorPolicy,AllocPolicy>::gaugeFix(*this,lat,t,parallel);
}
//Apply the phase exp(-ip.x) to each site of this vector, where p is a *three momentum*
//The units of the momentum are 2pi/L for periodic BCs, pi/L for antiperiodic BCs and pi/2L for G-parity BCs
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion3D<mf_Complex,FlavorPolicy,AllocPolicy>::apply_phase_site_op(const int &sf,const int p[],double punits[]){
int x[this->Dimension]; int f; this->fsiteUnmap(sf,x,f);
CPSfermion<mf_Complex,SpatialPolicy,FlavorPolicy,AllocPolicy>::apply_phase_site_op(x,f,p,punits);
}
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
void CPSfermion3D<mf_Complex,FlavorPolicy,AllocPolicy>::applyPhase(const int p[], const bool ¶llel){
const char *fname = "apply_phase(int p[])";
double punits[3];
CPSfermion<mf_Complex,SpatialPolicy,FlavorPolicy>::getMomentumUnits(punits);
if(parallel){
#pragma omp parallel for
for(int sf=0;sf<this->nfsites();sf++)
apply_phase_site_op(sf,p,punits);
}else{
for(int sf=0;sf<this->nfsites();sf++)
apply_phase_site_op(sf,p,punits);
}
}
//Make a random complex scalar field of type
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPScomplex4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::setRandom(const RandomType &type){
LRG.SetInterval(1, 0);
for(int i = 0; i < this->sites*this->flavors; ++i) {
int flav = i / this->sites;
int st = i % this->sites;
LRG.AssignGenerator(st,flav);
mf_Complex *p = this->site_ptr(st,flav);
RandomComplex<mf_Complex>::rand(p,type,FOUR_D);
}
}
//Set the real and imaginary parts to uniform random numbers drawn from the appropriate local RNGs
template< typename mf_Complex, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPScomplex4D<mf_Complex,DimensionPolicy,FlavorPolicy,AllocPolicy>::setUniformRandom(const Float &hi, const Float &lo){
typedef typename mf_Complex::value_type mf_Float;
LRG.SetInterval(hi,lo);
for(int i = 0; i < this->sites*this->flavors; ++i) {
int flav = i / this->sites;
int st = i % this->sites;
LRG.AssignGenerator(st,flav);
mf_Float *p = (mf_Float*)this->site_ptr(st,flav);
for(int i=0;i<2;i++)
*(p++) = LRG.Urand(FOUR_D);
}
}
//Perform the FFT
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
void CPSglobalComplexSpatial<mf_Complex,FlavorPolicy,AllocPolicy>::fft(){
typedef typename mf_Complex::value_type mf_Float;
const int fft_dim[3] = {this->glb_size[2], this->glb_size[1], this->glb_size[0]};
const int size_3d_glb = fft_dim[0] * fft_dim[1] * fft_dim[2];
size_t this_floatsize = this->size() * 2;
typename FFTWwrapper<mf_Float>::complexType* fft_mem = FFTWwrapper<mf_Float>::alloc_complex(this_floatsize);
memcpy((void *)fft_mem, this->ptr(), this_floatsize*sizeof(mf_Float));
//Plan creation is expensive, so make it static
static typename FFTWwrapper<mf_Float>::planType plan_src;
static bool init = false;
if(!init){
plan_src = FFTWwrapper<mf_Float>::plan_many_dft(3, fft_dim, 1,
fft_mem, NULL, 1, size_3d_glb,
fft_mem, NULL, 1, size_3d_glb,
FFTW_FORWARD, FFTW_ESTIMATE);
init = true;
}
for(int f = 0; f < this->nflavors(); f++) {
int off = f * size_3d_glb;
FFTWwrapper<mf_Float>::execute_dft(plan_src, fft_mem + off, fft_mem + off);
}
memcpy((void *)this->ptr(), (void*)fft_mem, this_floatsize*sizeof(mf_Float));
FFTWwrapper<mf_Float>::free(fft_mem);
//FFTWwrapper<mf_Float>::cleanup(); //Don't need to cleanup, it doesn't have the function I initially thought
}
//Scatter to a local field
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy,
typename extComplex, typename extDimPolicy, typename extAllocPolicy,
typename complex_class, int extEuclDim>
struct _CPSglobalComplexSpatial_scatter_impl{};
//Standard implementation for std::complex
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy,
typename extComplex, typename extDimPolicy, typename extAllocPolicy>
struct _CPSglobalComplexSpatial_scatter_impl<mf_Complex,FlavorPolicy,AllocPolicy, extComplex, extDimPolicy, extAllocPolicy, complex_double_or_float_mark, 3>{
static void doit(CPSfield<extComplex,1,extDimPolicy,FlavorPolicy,extAllocPolicy> &to, const CPSglobalComplexSpatial<mf_Complex,FlavorPolicy,AllocPolicy> &from){
const char *fname = "scatter(...)";
int orig[3]; for(int i=0;i<3;i++) orig[i] = GJP.NodeSites(i)*GJP.NodeCoor(i);
#pragma omp parallel for
for(int i=0;i<to.nfsites();i++){
int x[3]; int flavor; to.fsiteUnmap(i,x,flavor); //unmap the target coordinate
for(int j=0;j<3;j++) x[j] += orig[j]; //global coord
extComplex* tosite = to.fsite_ptr(i);
mf_Complex const* fromsite = from.site_ptr(x,flavor);
*tosite = *fromsite;
}
}
};
#ifdef USE_GRID
//Implementation for Grid vector complex types
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy,
typename extComplex, typename extDimPolicy, typename extAllocPolicy>
struct _CPSglobalComplexSpatial_scatter_impl<mf_Complex,FlavorPolicy,AllocPolicy, extComplex, extDimPolicy, extAllocPolicy, grid_vector_complex_mark, 3>{
static void doit(CPSfield<extComplex,1,extDimPolicy,FlavorPolicy,extAllocPolicy> &to, const CPSglobalComplexSpatial<mf_Complex,FlavorPolicy,AllocPolicy> &from){
const char *fname = "scatter(...)";
int orig[3]; for(int i=0;i<3;i++) orig[i] = GJP.NodeSites(i)*GJP.NodeCoor(i);
const int ndim = 3;
int nsimd = extComplex::Nsimd();
std::vector<std::vector<int> > packed_offsets(nsimd,std::vector<int>(ndim)); //get the vector offsets for the different SIMD packed sites
for(int i=0;i<nsimd;i++) to.SIMDunmap(i,&packed_offsets[i][0]);
#pragma omp parallel for
for(int i=0;i<to.nfsites();i++){
int x[3]; int flavor; to.fsiteUnmap(i,x,flavor); //unmap the target coordinate. This is a root coordinate, we need to construct the other offsets
for(int j=0;j<3;j++) x[j] += orig[j]; //global coord
extComplex* toptr = to.fsite_ptr(i);
//x is the root coordinate corresponding to SIMD packed index 0
std::vector<mf_Complex const*> ptrs(nsimd);
ptrs[0] = from.site_ptr(x,flavor);
int xx[ndim];
for(int i=1;i<nsimd;i++){
for(int d=0;d<ndim;d++)
xx[d] = x[d] + packed_offsets[i][d]; //xx = x + offset
ptrs[i] = from.site_ptr(xx,flavor);
}
to.SIMDpack(toptr, ptrs, 1);
}
}
};
#endif
template< typename mf_Complex, typename FlavorPolicy, typename AllocPolicy>
template<typename extComplex, typename extDimPolicy, typename extAllocPolicy>
void CPSglobalComplexSpatial<mf_Complex,FlavorPolicy,AllocPolicy>::scatter(CPSfield<extComplex,1,extDimPolicy,FlavorPolicy,extAllocPolicy> &to) const{
_CPSglobalComplexSpatial_scatter_impl<mf_Complex,FlavorPolicy,AllocPolicy,
extComplex,extDimPolicy,extAllocPolicy,
typename ComplexClassify<extComplex>::type,
extDimPolicy::EuclideanDimension>::doit(to,*this);
}
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy,
typename extSiteType, typename extDimPol, typename extAllocPol,
typename my_enable_if<intEq<DimensionPolicy::EuclideanDimension,extDimPol::EuclideanDimension>::val, int>::type = 0>
struct _gather_scatter_impl{
typedef typename DimensionPolicy::EquivalentLocalPolicy EquivalentLocalPolicy;
static void gather(CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &into, const CPSfield<extSiteType,SiteSize,extDimPol,FlavorPolicy,extAllocPol> &from){
NullObject n;
CPSfield<SiteType,SiteSize,EquivalentLocalPolicy,FlavorPolicy,AllocPolicy> tmp(n);
tmp.importField(from);
_gather_scatter_impl<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,
SiteType, EquivalentLocalPolicy, AllocPolicy>::gather(into, tmp);
}
static void scatter(CPSfield<extSiteType,SiteSize,extDimPol,FlavorPolicy,extAllocPol> &to, const CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from){
NullObject n;
CPSfield<SiteType,SiteSize,EquivalentLocalPolicy,FlavorPolicy,AllocPolicy> tmp(n);
_gather_scatter_impl<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,
SiteType, EquivalentLocalPolicy, AllocPolicy>::scatter(tmp, from);
to.importField(tmp);
}
};
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy,
typename my_enable_if<intEq<DimensionPolicy::EuclideanDimension,DimensionPolicy::EquivalentLocalPolicy::EuclideanDimension>::val, int>::type test>
struct _gather_scatter_impl<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,
SiteType, typename DimensionPolicy::EquivalentLocalPolicy, AllocPolicy, test>{
typedef typename DimensionPolicy::EquivalentLocalPolicy LocalDimensionPolicy;
static void gather(CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &into, const CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy> &from){
assert(LocalDimensionPolicy::EuclideanDimension == DimensionPolicy::EuclideanDimension);
const int &dir = into.getDir();
const char *fname = "gather(...)";
NullObject nullobj;
CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy> tmp1(nullobj);
CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy> tmp2(nullobj);
CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy>* send = const_cast<CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy>* >(&from);
CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy>* recv = &tmp2;
int cur_dir_origin = GJP.NodeSites(dir)*GJP.NodeCoor(dir);
int size_in_Float = from.size() * sizeof(SiteType) / sizeof(IFloat); //getPlusData measures the send/recv size in units of sizeof(IFloat)
int nshift = GJP.Nodes(dir);
for(int shift = 0; shift < nshift; shift++){
#pragma omp parallel for
for(int i=0;i<send->nfsites();i++){
int x[DimensionPolicy::EuclideanDimension]; int flavor; send->fsiteUnmap(i,x,flavor); //unmap the buffer coordinate
x[dir] += cur_dir_origin; //now a global coordinate in the dir direction
SiteType* tosite = into.site_ptr(x,flavor);
SiteType* fromsite = send->fsite_ptr(i);
memcpy((void*)tosite, (void*)fromsite, into.siteSize()*sizeof(SiteType));
}
if(shift != nshift-1){
getPlusData((IFloat*)recv->ptr(), (IFloat*)send->ptr(), size_in_Float, dir);
cur_dir_origin += GJP.NodeSites(dir);
cur_dir_origin %= (GJP.NodeSites(dir)*GJP.Nodes(dir));
if(shift == 0){
recv = &tmp1;
send = &tmp2;
}else std::swap(send,recv);
}
}
}
static void scatter(CPSfield<SiteType,SiteSize,LocalDimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from){
assert(LocalDimensionPolicy::EuclideanDimension == DimensionPolicy::EuclideanDimension);
const int &dir = from.getDir();
const char *fname = "scatter(...)";
int cur_dir_origin = GJP.NodeSites(dir)*GJP.NodeCoor(dir);
#pragma omp parallel for
for(int i=0;i<to.nfsites();i++){
int x[DimensionPolicy::EuclideanDimension]; int flavor; to.fsiteUnmap(i,x, flavor); //unmap the target coordinate
x[dir] += cur_dir_origin; //now a global coordinate in the dir direction
SiteType* tosite = to.fsite_ptr(i);
SiteType const* fromsite = from.site_ptr(x,flavor);
memcpy((void*)tosite, (void*)fromsite, from.siteSize()*sizeof(SiteType));
}
}
};
//Gather up the row. Involves internode communication
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template<typename extSiteType, typename extDimPol, typename extAllocPol>
void CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::gather(const CPSfield<extSiteType,SiteSize,extDimPol,FlavorPolicy,extAllocPol> &from){
_gather_scatter_impl<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,
extSiteType, extDimPol, extAllocPol>::gather(*this, from);
}
//Scatter back out. Involves no communication
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
template<typename extSiteType, typename extDimPol, typename extAllocPol>
void CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::scatter(CPSfield<extSiteType,SiteSize,extDimPol,FlavorPolicy,extAllocPol> &to) const{
_gather_scatter_impl<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy,
extSiteType, extDimPol, extAllocPol>::scatter(to, *this);
}
#define FFT_MULTI
#ifndef FFT_MULTI
//Perform a fast Fourier transform along the principal direction
//NOTE: This won't work correctly if the DimensionPolicy does not use canonical ordering: FIXME
//Assumes SiteType is a std::complex type
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::fft(const bool inverse_transform){
const int dir = this->getDir();
const char* fname = "fft()";
//We do a large number of simple linear FFTs. This field has its principal direction as the fastest changing index so this is nice and easy
int sc_size = this->siteSize(); //we have to assume the sites comprise complex numbers
int size_1d_glb = GJP.NodeSites(dir) * GJP.Nodes(dir);
const int n_fft = this->nsites() / GJP.NodeSites(dir) * sc_size * this->nflavors();
//Plan creation is expensive, so make it static and only re-create if the field size changes
//Create a plan for each direction because we can have non-cubic spatial volumes
static FFTplanContainer<typename SiteType::value_type> plan_f[4];
static bool plan_init = false;
static int plan_sc_size;
static bool plan_inv_trans;
if(!plan_init || sc_size != plan_sc_size || inverse_transform != plan_inv_trans){ //recreate/create
typename FFTWwrapper<typename SiteType::value_type>::complexType *tmp_f; //I don't think it actually does anything with this
for(int i=0;i<4;i++){
int size_i = GJP.NodeSites(i) * GJP.Nodes(i);
plan_f[i].setPlan(1, &size_i, 1,
tmp_f, NULL, sc_size, size_i * sc_size,
tmp_f, NULL, sc_size, size_i * sc_size,
inverse_transform ? FFTW_BACKWARD : FFTW_FORWARD, FFTW_ESTIMATE);
}
plan_sc_size = sc_size;
plan_inv_trans = inverse_transform;
plan_init = true;
}
typename FFTWwrapper<typename SiteType::value_type>::complexType *fftw_mem = FFTWwrapper<typename SiteType::value_type>::alloc_complex(size_1d_glb * n_fft);
memcpy((void *)fftw_mem, this->ptr(), this->size()*sizeof(SiteType));
#pragma omp parallel for
for(int n = 0; n < n_fft; n++) {
int sc_id = n % sc_size;
int chunk_id = n / sc_size; //3d block index
int off = size_1d_glb * sc_size * chunk_id + sc_id;
FFTWwrapper<typename SiteType::value_type>::execute_dft(plan_f[dir].getPlan(), fftw_mem + off, fftw_mem + off);
}
//FFTWwrapper<SiteType>::cleanup(); //I think this actually destroys existing plans!
if(!inverse_transform) memcpy(this->ptr(), (void *)fftw_mem, this->size()*sizeof(SiteType));
else for(int i=0;i<this->size();i++) this->ptr()[i] = *( (SiteType*)fftw_mem+i )/double(size_1d_glb);
FFTWwrapper<typename SiteType::value_type>::free(fftw_mem);
}
#else
template< typename SiteType, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void CPSfieldGlobalInOneDir<SiteType,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy>::fft(const bool inverse_transform){
const int dir = this->getDir();
const char* fname = "fft()";
//We do a large number of simple linear FFTs. This field has its principal direction as the fastest changing index so this is nice and easy
int sc_size = this->siteSize(); //we have to assume the sites comprise complex numbers
int size_1d_glb = GJP.NodeSites(dir) * GJP.Nodes(dir);
const int n_fft = this->nsites() / GJP.NodeSites(dir) * this->nflavors();
//Plan creation is expensive, so make it static and only re-create if the field size changes
//Create a plan for each direction because we can have non-cubic spatial volumes
static FFTplanContainer<typename SiteType::value_type> plan_f[4];
static bool plan_init = false;
static int plan_sc_size;
static bool plan_inv_trans;
if(!plan_init || sc_size != plan_sc_size || inverse_transform != plan_inv_trans){ //recreate/create
typename FFTWwrapper<typename SiteType::value_type>::complexType *tmp_f; //I don't think it actually does anything with this
for(int i=0;i<4;i++){
int size_i = GJP.NodeSites(i) * GJP.Nodes(i);
plan_f[i].setPlan(1, &size_i, sc_size,
tmp_f, NULL, sc_size, 1,
tmp_f, NULL, sc_size, 1,
inverse_transform ? FFTW_BACKWARD : FFTW_FORWARD, FFTW_ESTIMATE);
}
plan_sc_size = sc_size;
plan_inv_trans = inverse_transform;
plan_init = true;
}
typename FFTWwrapper<typename SiteType::value_type>::complexType *fftw_mem = FFTWwrapper<typename SiteType::value_type>::alloc_complex(size_1d_glb * n_fft * sc_size);
memcpy((void *)fftw_mem, this->ptr(), this->size()*sizeof(SiteType));
#pragma omp parallel for
for(int n = 0; n < n_fft; n++) {
int chunk_id = n; //3d block index
int off = size_1d_glb * sc_size * chunk_id;
FFTWwrapper<typename SiteType::value_type>::execute_dft(plan_f[dir].getPlan(), fftw_mem + off, fftw_mem + off);
}
if(!inverse_transform) memcpy(this->ptr(), (void *)fftw_mem, this->size()*sizeof(SiteType));
else for(int i=0;i<this->size();i++) this->ptr()[i] = *( (SiteType*)fftw_mem+i )/double(size_1d_glb);
FFTWwrapper<typename SiteType::value_type>::free(fftw_mem);
}
#endif
#endif
|
TopologicalSimplification.h | /// \ingroup base
/// \class ttk::TopologicalSimplification
/// \author Julien Tierny <julien.tierny@lip6.fr>
/// \author Guillaume Favelier <guillaume.favelier@lip6.fr>
/// \date February 2016
///
/// \brief TTK processing package for the topological simplification of scalar
/// data.
///
/// Given an input scalar field and a list of critical points to remove, this
/// class minimally edits the scalar field such that the listed critical points
/// disappear. This procedure is useful to speedup subsequent topological data
/// analysis when outlier critical points can be easily identified. It is
/// also useful for data simplification.
///
/// \b Related \b publication \n
/// "Generalized Topological Simplification of Scalar Fields on Surfaces" \n
/// Julien Tierny, Valerio Pascucci \n
/// Proc. of IEEE VIS 2012.\n
/// IEEE Transactions on Visualization and Computer Graphics, 2012.
///
/// \sa ttkTopologicalSimplification.cpp %for a usage example.
#ifndef _TOPOLOGICALSIMPLIFICATION_H
#define _TOPOLOGICALSIMPLIFICATION_H
// base code includes
#include <Wrapper.h>
#include <Triangulation.h>
#include <cmath>
#include <set>
#include <tuple>
#include <type_traits>
namespace ttk{
struct SweepCmp{
private :
bool isIncreasingOrder_;
public:
SweepCmp():
isIncreasingOrder_{}
{}
SweepCmp(bool isIncreasingOrder):
isIncreasingOrder_{isIncreasingOrder}
{}
int setIsIncreasingOrder(bool isIncreasingOrder){
isIncreasingOrder_=isIncreasingOrder;
return 0;
}
template <typename dataType>
bool operator() (const std::tuple<dataType,SimplexId,SimplexId> &v0,
const std::tuple<dataType,SimplexId,SimplexId> &v1) const{
if(isIncreasingOrder_){
return (std::get<0>(v0) < std::get<0>(v1) or
(std::get<0>(v0) == std::get<0>(v1) and std::get<1>(v0) < std::get<1>(v1)));
}
else{
return (std::get<0>(v0) > std::get<0>(v1) or
(std::get<0>(v0) == std::get<0>(v1) and std::get<1>(v0) > std::get<1>(v1)));
}
};
};
class TopologicalSimplification : public Debug{
public:
TopologicalSimplification();
~TopologicalSimplification();
template <typename dataType>
bool isLowerThan(SimplexId a, SimplexId b, dataType* scalars, SimplexId* offsets) const;
template <typename dataType>
bool isHigherThan(SimplexId a, SimplexId b, dataType* scalars, SimplexId* offsets) const;
template <typename dataType>
int getCriticalType(SimplexId vertexId, dataType* scalars, SimplexId* offsets) const;
template <typename dataType>
int getCriticalPoints(dataType* scalars,
SimplexId* offsets,
std::vector<SimplexId>& minList,
std::vector<SimplexId>& maxList) const;
template <typename dataType>
int getCriticalPoints(dataType* scalars,
SimplexId* offsets,
std::vector<SimplexId>& minList,
std::vector<SimplexId>& maxList,
std::vector<bool>& blackList) const;
template <typename dataType>
int addPerturbation(dataType* scalars, SimplexId* offsets) const;
template <typename dataType, typename idType>
int execute() const;
inline int setupTriangulation(Triangulation* triangulation){
triangulation_=triangulation;
if(triangulation_){
vertexNumber_ = triangulation_->getNumberOfVertices();
triangulation_->preprocessVertexNeighbors();
}
return 0;
}
inline int setVertexNumber(SimplexId vertexNumber){
vertexNumber_=vertexNumber;
return 0;
}
inline int setConstraintNumber(SimplexId constraintNumber){
constraintNumber_=constraintNumber;
return 0;
}
inline int setInputScalarFieldPointer(void *data){
inputScalarFieldPointer_=data;
return 0;
}
inline int setVertexIdentifierScalarFieldPointer(void* data){
vertexIdentifierScalarFieldPointer_=data;
return 0;
}
inline int setInputOffsetScalarFieldPointer(void* data){
inputOffsetScalarFieldPointer_=data;
return 0;
}
inline int setConsiderIdentifierAsBlackList(bool onOff){
considerIdentifierAsBlackList_=onOff;
return 0;
}
inline int setAddPerturbation(bool onOff){
addPerturbation_=onOff;
return 0;
}
inline int setOutputScalarFieldPointer(void *data){
outputScalarFieldPointer_=data;
return 0;
}
inline int setOutputOffsetScalarFieldPointer(void *data){
outputOffsetScalarFieldPointer_=data;
return 0;
}
protected:
Triangulation* triangulation_;
SimplexId vertexNumber_;
SimplexId constraintNumber_;
void* inputScalarFieldPointer_;
void* vertexIdentifierScalarFieldPointer_;
void* inputOffsetScalarFieldPointer_;
bool considerIdentifierAsBlackList_;
bool addPerturbation_;
void* outputScalarFieldPointer_;
void* outputOffsetScalarFieldPointer_;
};
}
// if the package is a pure template typename, uncomment the following line
// #include <TopologicalSimplification.cpp>
template <typename dataType>
bool ttk::TopologicalSimplification::isLowerThan(SimplexId a, SimplexId b, dataType*
scalars, SimplexId* offsets) const{
return (scalars[a]<scalars[b] or
(scalars[a]==scalars[b] and offsets[a]<offsets[b]));
}
template <typename dataType>
bool ttk::TopologicalSimplification::isHigherThan(SimplexId a, SimplexId b, dataType*
scalars, SimplexId* offsets) const{
return (scalars[a]>scalars[b] or
(scalars[a]==scalars[b] and offsets[a]>offsets[b]));
}
template <typename dataType>
int ttk::TopologicalSimplification::getCriticalType(SimplexId vertex, dataType*
scalars, SimplexId* offsets) const{
bool isMinima{true};
bool isMaxima{true};
SimplexId neighborNumber=triangulation_->getVertexNeighborNumber(vertex);
for(SimplexId i=0; i<neighborNumber; ++i){
SimplexId neighbor;
triangulation_->getVertexNeighbor(vertex,i,neighbor);
if(isLowerThan<dataType>(neighbor,vertex,scalars,offsets)) isMinima=false;
if(isHigherThan<dataType>(neighbor,vertex,scalars,offsets)) isMaxima=false;
if(!isMinima and !isMaxima){
return 0;
}
}
if(isMinima) return -1;
if(isMaxima) return 1;
return 0;
}
template <typename dataType>
int ttk::TopologicalSimplification::getCriticalPoints(dataType* scalars,
SimplexId* offsets,
std::vector<SimplexId>& minima,
std::vector<SimplexId>& maxima) const{
std::vector<int> type(vertexNumber_, 0);
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for
#endif
for(SimplexId k=0; k<vertexNumber_; ++k)
type[k]=getCriticalType<dataType>(k,scalars,offsets);
for(SimplexId k=0; k<vertexNumber_; ++k){
if(type[k]<0) minima.push_back(k);
else if(type[k]>0) maxima.push_back(k);
}
return 0;
}
template <typename dataType>
int ttk::TopologicalSimplification::getCriticalPoints(dataType* scalars,
SimplexId* offsets,
std::vector<SimplexId>& minima,
std::vector<SimplexId>& maxima,
std::vector<bool>& extrema) const{
std::vector<int> type(vertexNumber_);
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId k=0; k<vertexNumber_; ++k){
if(considerIdentifierAsBlackList_ xor extrema[k]){
type[k]=getCriticalType<dataType>(k,scalars,offsets);
}
}
for(SimplexId k=0; k<vertexNumber_; ++k){
if(type[k]<0) minima.push_back(k);
else if(type[k]>0) maxima.push_back(k);
}
return 0;
}
template <typename dataType>
int ttk::TopologicalSimplification::addPerturbation(dataType* scalars, SimplexId*
offsets) const{
dataType epsilon{};
if(std::is_same<dataType,double>::value) epsilon=pow10(1-DBL_DIG);
else if(std::is_same<dataType,float>::value) epsilon=pow10(1-FLT_DIG);
else return -1;
std::vector<std::tuple<dataType,SimplexId,SimplexId>> perturbation(vertexNumber_);
for(SimplexId i=0; i<vertexNumber_; ++i){
std::get<0>(perturbation[i])=scalars[i];
std::get<1>(perturbation[i])=offsets[i];
std::get<2>(perturbation[i])=i;
}
SweepCmp cmp(true);
sort(perturbation.begin(), perturbation.end(), cmp);
for(SimplexId i=0; i<vertexNumber_; ++i){
if(i){
if(std::get<0>(perturbation[i]) <= std::get<0>(perturbation[i-1]))
std::get<0>(perturbation[i])=std::get<0>(perturbation[i-1]) + epsilon;
}
scalars[std::get<2>(perturbation[i])]=std::get<0>(perturbation[i]);
}
return 0;
}
template <typename dataType, typename idType>
int ttk::TopologicalSimplification::execute() const{
// get input data
dataType* inputScalars=static_cast<dataType*>(inputScalarFieldPointer_);
dataType* scalars=static_cast<dataType*>(outputScalarFieldPointer_);
idType* identifiers=static_cast<idType*>(vertexIdentifierScalarFieldPointer_);
idType* inputOffsets=static_cast<idType*>(inputOffsetScalarFieldPointer_);
SimplexId* offsets=static_cast<SimplexId*>(outputOffsetScalarFieldPointer_);
Timer t;
// pre-processing
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId k=0; k<vertexNumber_; ++k){
scalars[k]=inputScalars[k];
if(std::isnan((double) scalars[k]))
scalars[k] = 0;
offsets[k]=inputOffsets[k];
}
// get the user extremum list
std::vector<bool> extrema(vertexNumber_, false);
for(SimplexId k=0; k<constraintNumber_; ++k){
const SimplexId identifierId=identifiers[k];
#ifndef TTK_ENABLE_KAMIKAZE
if(identifierId>=0 and identifierId<vertexNumber_)
#endif
extrema[identifierId]=true;
}
std::vector<SimplexId> authorizedMinima;
std::vector<SimplexId> authorizedMaxima;
std::vector<bool> authorizedExtrema(vertexNumber_, false);
getCriticalPoints<dataType>(
scalars, offsets,
authorizedMinima,
authorizedMaxima,
extrema);
{
std::stringstream msg;
msg << "[TopologicalSimplification] Maintaining "
<< constraintNumber_
<< " constraints ("
<< authorizedMinima.size() << " minima and "
<< authorizedMaxima.size() << " maxima)." << std::endl;
dMsg(std::cout, msg.str(), advancedInfoMsg);
}
// declare the tuple-comparison functor
SweepCmp cmp;
// processing
int iteration{};
for(SimplexId i=0; i<vertexNumber_; ++i){
{
std::stringstream msg;
msg << "[TopologicalSimplification] Starting simplifying iteration #"
<< i << "..." << std::endl;
dMsg(std::cout, msg.str(), advancedInfoMsg);
}
for(int j=0; j<2; ++j){
bool isIncreasingOrder=!j;
cmp.setIsIncreasingOrder(isIncreasingOrder);
std::set<std::tuple<dataType,SimplexId,SimplexId>, decltype(cmp)> sweepFront(cmp);
std::vector<bool> visitedVertices(vertexNumber_, false);
std::vector<SimplexId> adjustmentSequence(vertexNumber_);
// add the seeds
if(isIncreasingOrder){
for(SimplexId k : authorizedMinima){
authorizedExtrema[k]=true;
sweepFront.emplace(scalars[k],offsets[k],k);
visitedVertices[k]=true;
}
}
else{
for(SimplexId k : authorizedMaxima){
authorizedExtrema[k]=true;
sweepFront.emplace(scalars[k],offsets[k],k);
visitedVertices[k]=true;
}
}
// growth by neighborhood of the seeds
SimplexId adjustmentPos = 0;
do{
auto front=sweepFront.begin();
if(front==sweepFront.end()) return -1;
SimplexId vertexId=std::get<2>(*front);
sweepFront.erase(front);
SimplexId neighborNumber=triangulation_->getVertexNeighborNumber(vertexId);
for(SimplexId k=0; k<neighborNumber; ++k){
SimplexId neighbor;
triangulation_->getVertexNeighbor(vertexId,k,neighbor);
if(!visitedVertices[neighbor]){
sweepFront.emplace(scalars[neighbor],offsets[neighbor],neighbor);
visitedVertices[neighbor]=true;
}
}
adjustmentSequence[adjustmentPos]=vertexId;
++adjustmentPos;
}while(!sweepFront.empty());
// save offsets and rearrange scalars
SimplexId offset = (isIncreasingOrder ? 0 : vertexNumber_ + 1);
for(SimplexId k=0; k<vertexNumber_; ++k){
if(isIncreasingOrder){
if(k and scalars[adjustmentSequence[k]] <= scalars[adjustmentSequence[k-1]])
scalars[adjustmentSequence[k]]=scalars[adjustmentSequence[k-1]];
++offset;
}
else{
if(k and scalars[adjustmentSequence[k]] >= scalars[adjustmentSequence[k-1]])
scalars[adjustmentSequence[k]]=scalars[adjustmentSequence[k-1]];
--offset;
}
offsets[adjustmentSequence[k]]=offset;
}
}
// test convergence
bool needForMoreIterations{false};
std::vector<SimplexId> minima;
std::vector<SimplexId> maxima;
getCriticalPoints<dataType>(scalars,offsets,minima,maxima);
if(maxima.size() > authorizedMaxima.size()) needForMoreIterations=true;
if(minima.size() > authorizedMinima.size()) needForMoreIterations=true;
{
std::stringstream msg;
msg << "[TopologicalSimplification] Current status: "
<< minima.size() << " minima, "
<< maxima.size() << " maxima." << std::endl;
dMsg(std::cout, msg.str(), advancedInfoMsg);
}
if(!needForMoreIterations){
for(SimplexId k : minima){
if(!authorizedExtrema[k]){
needForMoreIterations=true;
break;
}
}
}
if(!needForMoreIterations){
for(SimplexId k : maxima){
if(!authorizedExtrema[k]){
needForMoreIterations=true;
break;
}
}
}
// optional adding of perturbation
if(addPerturbation_) addPerturbation<dataType>(scalars, offsets);
++iteration;
if(!needForMoreIterations) break;
}
{
std::stringstream msg;
msg << "[TopologicalSimplification] Scalar field simplified"
<< " in " << t.getElapsedTime() << " s. (" << threadNumber_
<< " threads(s), " << iteration << " ite.)."
<< std::endl;
dMsg(std::cout,msg.str(),timeMsg);
}
return 0;
}
#endif // TOPOLOGICALSIMPLIFICATION_H
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,0,0,MagickTrue,exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict crop_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict excerpt_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) memcpy(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flip_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) memcpy(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict destination_indexes;
register PixelPacket
*magick_restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) memcpy(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) memcpy(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transpose_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) memcpy(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transverse_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
shallow_water_residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED
#define KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED
// System includes
// External includes
// Project includes
#include "shallow_water_application_variables.h"
#include "custom_utilities/flow_rate_slip_utility.h"
#include "solving_strategies/schemes/residual_based_bdf_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ShallowWaterResidualBasedBDFScheme
* @ingroup KratosShallowWaterApplication
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$n\f$ order Backward Differentiation Formula (BDF) method is a two step \f$n\f$ order accurate method.
* This scheme is designed to solve a system of the type:
* \f[
* \mathbf{M} \frac{du_{n0}}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
* @author Miguel Maso Sotomayor
*/
template<class TSparseSpace, class TDenseSpace>
class ShallowWaterResidualBasedBDFScheme
: public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ShallowWaterResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType;
typedef typename BDFBaseType::DofsArrayType DofsArrayType;
typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BDFBaseType::TSystemVectorType TSystemVectorType;
typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::NodeType NodeType;
typedef FlowRateSlipUtility<LocalSystemMatrixType,LocalSystemVectorType,double>FlowRateSlipToolType;
///@}
///@name Life Cycle
///@{
// Constructor
explicit ShallowWaterResidualBasedBDFScheme(const std::size_t Order = 2, bool UpdateVelocities = false)
: BDFBaseType(Order)
, mRotationTool()
, mUpdateVelocities(UpdateVelocities)
{}
// Copy Constructor
explicit ShallowWaterResidualBasedBDFScheme(ShallowWaterResidualBasedBDFScheme& rOther)
: BDFBaseType(rOther)
, mRotationTool()
, mUpdateVelocities(rOther.mUpdateVelocities)
{}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ShallowWaterResidualBasedBDFScheme(*this) );
}
// Destructor
~ShallowWaterResidualBasedBDFScheme() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution within newton iteration
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(rModelPart);
mpDofUpdater->UpdateDofs(rDofSet, rDx);
mRotationTool.RecoverVelocities(rModelPart);
BDFBaseType::UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
if (mUpdateVelocities) UpdateVelocities(rModelPart);
KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Update");
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
const double delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
const std::array<const Variable<double>*, 3> var_components = {&MOMENTUM_X, &MOMENTUM_Y, &HEIGHT};
const std::array<const Variable<double>*, 3> accel_components = {&ACCELERATION_X, &ACCELERATION_Y, &VERTICAL_VELOCITY};
#pragma omp parallel for
for (int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
for (std::size_t j = 0; j < 3; ++j)
{
if (!it_node->IsFixed(*var_components[j])) {
double& un0 = it_node->FastGetSolutionStepValue(*var_components[j]);
double un1 = it_node->FastGetSolutionStepValue(*var_components[j], 1);
double dot_un1 = it_node->FastGetSolutionStepValue(*accel_components[j], 1);
un0 = un1 + delta_time * dot_un1;
}
}
UpdateFirstDerivative(it_node);
}
KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Predict");
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @param rCurrentElement The element to compute
* @param rLHS_Contribution The LHS matrix contribution
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateSystemContributions(
rCurrentElement,
rLHS_Contribution,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry());
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentElement The element to compute
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateRHSContribution(
rCurrentElement,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rRHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentElement.GetGeometry());
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @param rCurrentCondition The condition to compute
* @param rLHS_Contribution The LHS matrix contribution
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateSystemContributions(
rCurrentCondition,
rLHS_Contribution,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry());
mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry());
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentCondition The condition to compute
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Condition& rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateRHSContribution(
rCurrentCondition,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rRHS_Contribution,rCurrentCondition.GetGeometry());
mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentCondition.GetGeometry());
}
/*
* @brief Free memory allocated by this class.
*/
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ShallowWaterResidualBasedBDFScheme";
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
FlowRateSlipToolType mRotationTool;
bool mUpdateVelocities;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Updating first time derivative
* @param itNode the node interator
*/
void UpdateFirstDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dot_un0 = itNode->FastGetSolutionStepValue(ACCELERATION);
double& dot_hn0 = itNode->FastGetSolutionStepValue(VERTICAL_VELOCITY);
noalias(dot_un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(MOMENTUM);
dot_hn0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(HEIGHT);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
{
noalias(dot_un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(MOMENTUM, i_order);
dot_hn0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(HEIGHT, i_order);
}
}
/**
* @brief Updating second time derivative
* @param itNode the node interator
*/
void UpdateSecondDerivative(NodesArrayType::iterator itNode) override {}
/**
* @brief Updating the velocities
* @param rModelPart The model part to compute
*/
void UpdateVelocities(ModelPart& rModelPart)
{
block_for_each(rModelPart.Nodes(), [&](NodeType& r_node){
auto& vel = r_node.FastGetSolutionStepValue(VELOCITY);
const auto& q = r_node.FastGetSolutionStepValue(MOMENTUM);
const auto& h = r_node.FastGetSolutionStepValue(HEIGHT);
vel = q / h;
});
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * BDFBaseType::mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_element = rElement;
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_element.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_condition = rCondition;
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_condition.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
}; // Class ShallowWaterResidualBasedBDFScheme
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // Namespace Kratos
#endif // KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED defined
|
GB_unop__tanh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tanh_fp64_fp64)
// op(A') function: GB (_unop_tran__tanh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tanh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tanh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tanh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tanh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tanh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tanh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tanh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int64)
// A*D function (colscale): GB (_AxD__div_int64)
// D*A function (rowscale): GB (_DxB__div_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int64)
// C=scalar+B GB (_bind1st__div_int64)
// C=scalar+B' GB (_bind1st_tran__div_int64)
// C=A+scalar GB (_bind2nd__div_int64)
// C=A'+scalar GB (_bind2nd_tran__div_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT64 || GxB_NO_DIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 64) ; \
}
GrB_Info GB (_bind1st_tran__div_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 64) ; \
}
GrB_Info GB (_bind2nd_tran__div_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_mixed_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc -fopenmp-version=45
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -fopenmp-version=45 | FileCheck %s --check-prefix GPU
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -emit-pch -o %t -fopenmp-version=45
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - -fopenmp-version=45 | FileCheck %s --check-prefix GPU
// RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix GPU
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -emit-pch -o %t
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - | FileCheck %s --check-prefix GPU
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
int dev(double i) { return 0; }
int hst(double i) { return 1; }
#pragma omp declare variant(hst) match(device = {kind(host)})
#pragma omp declare variant(dev) match(device = {kind(gpu)})
int base();
// HOST-LABEL: define void @foo()
// HOST: call i32 @hst(double -1.000000e+00)
// HOST: call i32 @hst(double -2.000000e+00)
// HOST: call void [[OFFL:@.+_foo_l36]]()
void foo() {
base(-1);
hst(-2);
#pragma omp target
{
base(-3);
dev(-4);
}
}
// HOST: define {{.*}}void [[OFFL]]()
// HOST: call i32 @hst(double -3.000000e+00)
// HOST: call i32 @dev(double -4.000000e+00)
// GPU: define {{.*}}void @__omp_offloading_{{.+}}_foo_l36()
// GPU: call i32 @dev(double -3.000000e+00)
// GPU: call i32 @dev(double -4.000000e+00)
// GPU-NOT: @base
// GPU: define {{.*}}i32 @dev(double
// GPU: ret i32 0
#endif // HEADER
|
GB_binop__ge_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int8)
// A*D function (colscale): GB (_AxD__ge_int8)
// D*A function (rowscale): GB (_DxB__ge_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int8)
// C=scalar+B GB (_bind1st__ge_int8)
// C=scalar+B' GB (_bind1st_tran__ge_int8)
// C=A+scalar GB (_bind2nd__ge_int8)
// C=A'+scalar GB (_bind2nd_tran__ge_int8)
// C type: bool
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT8 || GxB_NO_GE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
register ssize_t
i,
j;
MagickPixelPacket
**pixels;
size_t
length,
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
length=image->columns;
if (length < number_images)
length=number_images;
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) length; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-
(int) MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
Quantum pixel,const MagickEvaluateOperator op,const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) MagickMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,AllChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
const Image
*next;
Image
*evaluate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict evaluate_pixels,
zero;
RandomInfo
**restrict random_info;
size_t
number_images;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=images; next != (Image *) NULL; next=GetNextImageInList(next))
if ((next->columns != images->columns) || (next->rows != images->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ImageWidthsOrHeightsDiffer","`%s'",images->filename);
return((Image *) NULL);
}
/*
Initialize evaluate next attributes.
*/
evaluate_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (evaluate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(evaluate_image,DirectClass) == MagickFalse)
{
InheritException(exception,&evaluate_image->exception);
evaluate_image=DestroyImage(evaluate_image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
evaluate_image=DestroyImage(evaluate_image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireCacheView(evaluate_image);
if (op == MedianEvaluateOperator)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
p->red,op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
p->green,op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
p->blue,op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
p->opacity,op,evaluate_pixel[i].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
q->red=ClampToQuantum(evaluate_pixel[i/2].red);
q->green=ClampToQuantum(evaluate_pixel[i/2].green);
q->blue=ClampToQuantum(evaluate_pixel[i/2].blue);
if (evaluate_image->matte == MagickFalse)
q->opacity=ClampToQuantum(evaluate_pixel[i/2].opacity);
else
q->opacity=ClampToQuantum(QuantumRange-evaluate_pixel[i/2].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_indexes[i]=ClampToQuantum(evaluate_pixel[i/2].index);
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
else
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
p->red,i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
p->green,i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
p->blue,i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
p->opacity,i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
indexes[x],i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
q->red=ClampToQuantum(evaluate_pixel[x].red);
q->green=ClampToQuantum(evaluate_pixel[x].green);
q->blue=ClampToQuantum(evaluate_pixel[x].blue);
if (evaluate_image->matte == MagickFalse)
q->opacity=ClampToQuantum(evaluate_pixel[x].opacity);
else
q->opacity=ClampToQuantum(QuantumRange-evaluate_pixel[x].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_indexes[x]=ClampToQuantum(evaluate_pixel[x].index);
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
evaluate_image=DestroyImage(evaluate_image);
return(evaluate_image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**restrict random_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(ApplyEvaluateOperator(random_info[id],q->red,op,
value));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(ApplyEvaluateOperator(random_info[id],q->green,
op,value));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(ApplyEvaluateOperator(random_info[id],q->blue,op,
value));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=ClampToQuantum(ApplyEvaluateOperator(random_info[id],
q->opacity,op,value));
else
q->opacity=ClampToQuantum(QuantumRange-ApplyEvaluateOperator(
random_info[id],(Quantum) GetAlphaPixelComponent(q),op,value));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
indexes[x]=(IndexPacket) ClampToQuantum(ApplyEvaluateOperator(
random_info[id],indexes[x],op,value));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result = result*QuantumScale*pixel + parameters[i];
result *= QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,AllChannels,function,number_parameters,
parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ApplyFunction(q->red,function,number_parameters,parameters,
exception);
if ((channel & GreenChannel) != 0)
q->green=ApplyFunction(q->green,function,number_parameters,parameters,
exception);
if ((channel & BlueChannel) != 0)
q->blue=ApplyFunction(q->blue,function,number_parameters,parameters,
exception);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=ApplyFunction(q->opacity,function,number_parameters,
parameters,exception);
else
q->opacity=(Quantum) QuantumRange-ApplyFunction((Quantum)
GetAlphaPixelComponent(q),function,number_parameters,parameters,
exception);
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
indexes[x]=(IndexPacket) ApplyFunction(indexes[x],function,
number_parameters,parameters,exception);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
return(GetImageChannelExtrema(image,AllChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,AllChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[AllChannels].mean=0.0;
channel_statistics[AllChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[AllChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[RedChannel].variance-
channel_statistics[RedChannel].mean*
channel_statistics[RedChannel].mean;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[AllChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[GreenChannel].variance-
channel_statistics[GreenChannel].mean*
channel_statistics[GreenChannel].mean;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[AllChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[BlueChannel].variance-
channel_statistics[BlueChannel].mean*
channel_statistics[BlueChannel].mean;
channels++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
channel_statistics[AllChannels].mean+=
channel_statistics[OpacityChannel].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[OpacityChannel].variance-
channel_statistics[OpacityChannel].mean*
channel_statistics[OpacityChannel].mean;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[AllChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[BlackChannel].variance-
channel_statistics[BlackChannel].mean*
channel_statistics[BlackChannel].mean;
channels++;
}
channel_statistics[AllChannels].mean/=channels;
channel_statistics[AllChannels].standard_deviation=
sqrt(channel_statistics[AllChannels].standard_deviation/channels);
*mean=channel_statistics[AllChannels].mean;
*standard_deviation=channel_statistics[AllChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,AllChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetRedPixelComponent(p);
sum_squares+=(double) p->red*GetRedPixelComponent(p);
sum_cubes+=(double) p->red*p->red*GetRedPixelComponent(p);
sum_fourth_power+=(double) p->red*p->red*p->red*
GetRedPixelComponent(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetGreenPixelComponent(p);
sum_squares+=(double) p->green*GetGreenPixelComponent(p);
sum_cubes+=(double) p->green*p->green*GetGreenPixelComponent(p);
sum_fourth_power+=(double) p->green*p->green*p->green*
GetGreenPixelComponent(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetBluePixelComponent(p);
sum_squares+=(double) p->blue*GetBluePixelComponent(p);
sum_cubes+=(double) p->blue*p->blue*GetBluePixelComponent(p);
sum_fourth_power+=(double) p->blue*p->blue*p->blue*
GetBluePixelComponent(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetOpacityPixelComponent(p);
sum_squares+=(double) p->opacity*GetOpacityPixelComponent(p);
sum_cubes+=(double) p->opacity*p->opacity*GetOpacityPixelComponent(p);
sum_fourth_power+=(double) p->opacity*p->opacity*p->opacity*
GetOpacityPixelComponent(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
mean+=indexes[x];
sum_squares+=(double) indexes[x]*indexes[x];
sum_cubes+=(double) indexes[x]*indexes[x]*indexes[x];
sum_fourth_power+=(double) indexes[x]*indexes[x]*indexes[x]*
indexes[x];
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,AllChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-1.0E-37);
*minima=1.0E+37;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if ((channel & OpacityChannel) != 0)
{
if (pixel.opacity < *minima)
*minima=(double) pixel.opacity;
if (pixel.opacity > *maxima)
*maxima=(double) pixel.opacity;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) indexes[x] < *minima)
*minima=(double) indexes[x];
if ((double) indexes[x] > *maxima)
*maxima=(double) indexes[x];
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=AllChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= AllChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-1.0E-37);
channel_statistics[i].minima=1.0E+37;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
status=p->red != ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
status=p->green != ScaleAnyToQuantum(ScaleQuantumToAny(p->green,
range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
status=p->blue != ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,
range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
status=p->opacity != ScaleAnyToQuantum(ScaleQuantumToAny(
p->opacity,range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
status=indexes[x] != ScaleAnyToQuantum(ScaleQuantumToAny(
indexes[x],range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) p->red < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetRedPixelComponent(p);
if ((double) p->red > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetRedPixelComponent(p);
channel_statistics[RedChannel].sum+=GetRedPixelComponent(p);
channel_statistics[RedChannel].sum_squared+=(double) p->red*
GetRedPixelComponent(p);
channel_statistics[RedChannel].sum_cubed+=(double) p->red*p->red*
GetRedPixelComponent(p);
channel_statistics[RedChannel].sum_fourth_power+=(double) p->red*p->red*
p->red*GetRedPixelComponent(p);
if ((double) p->green < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double)
GetGreenPixelComponent(p);
if ((double) p->green > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double)
GetGreenPixelComponent(p);
channel_statistics[GreenChannel].sum+=GetGreenPixelComponent(p);
channel_statistics[GreenChannel].sum_squared+=(double) p->green*
GetGreenPixelComponent(p);
channel_statistics[GreenChannel].sum_cubed+=(double) p->green*p->green*
GetGreenPixelComponent(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double) p->green*
p->green*p->green*GetGreenPixelComponent(p);
if ((double) p->blue < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double)
GetBluePixelComponent(p);
if ((double) p->blue > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double)
GetBluePixelComponent(p);
channel_statistics[BlueChannel].sum+=GetBluePixelComponent(p);
channel_statistics[BlueChannel].sum_squared+=(double) p->blue*
GetBluePixelComponent(p);
channel_statistics[BlueChannel].sum_cubed+=(double) p->blue*p->blue*
GetBluePixelComponent(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double) p->blue*
p->blue*p->blue*GetBluePixelComponent(p);
if (image->matte != MagickFalse)
{
if ((double) p->opacity < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double)
GetOpacityPixelComponent(p);
if ((double) p->opacity > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double)
GetOpacityPixelComponent(p);
channel_statistics[OpacityChannel].sum+=GetOpacityPixelComponent(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
p->opacity*GetOpacityPixelComponent(p);
channel_statistics[OpacityChannel].sum_cubed+=(double) p->opacity*
p->opacity*GetOpacityPixelComponent(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
p->opacity*p->opacity*p->opacity*GetOpacityPixelComponent(p);
}
if (image->colorspace == CMYKColorspace)
{
if ((double) indexes[x] < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double) indexes[x];
if ((double) indexes[x] > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double) indexes[x];
channel_statistics[BlackChannel].sum+=indexes[x];
channel_statistics[BlackChannel].sum_squared+=(double)
indexes[x]*indexes[x];
channel_statistics[BlackChannel].sum_cubed+=(double) indexes[x]*
indexes[x]*indexes[x];
channel_statistics[BlackChannel].sum_fourth_power+=(double)
indexes[x]*indexes[x]*indexes[x]*indexes[x];
}
x++;
p++;
}
}
area=(double) image->columns*image->rows;
for (i=0; i < AllChannels; i++)
{
channel_statistics[i].sum/=area;
channel_statistics[i].sum_squared/=area;
channel_statistics[i].sum_cubed/=area;
channel_statistics[i].sum_fourth_power/=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].standard_deviation=sqrt(
channel_statistics[i].variance-(channel_statistics[i].mean*
channel_statistics[i].mean));
}
for (i=0; i < AllChannels; i++)
{
channel_statistics[AllChannels].depth=(size_t) MagickMax((double)
channel_statistics[AllChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[AllChannels].minima=MagickMin(
channel_statistics[AllChannels].minima,channel_statistics[i].minima);
channel_statistics[AllChannels].maxima=MagickMax(
channel_statistics[AllChannels].maxima,channel_statistics[i].maxima);
channel_statistics[AllChannels].sum+=channel_statistics[i].sum;
channel_statistics[AllChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[AllChannels].sum_cubed+=channel_statistics[i].sum_cubed;
channel_statistics[AllChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[AllChannels].mean+=channel_statistics[i].mean;
channel_statistics[AllChannels].variance+=channel_statistics[i].variance-
channel_statistics[i].mean*channel_statistics[i].mean;
channel_statistics[AllChannels].standard_deviation+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[AllChannels].sum/=channels;
channel_statistics[AllChannels].sum_squared/=channels;
channel_statistics[AllChannels].sum_cubed/=channels;
channel_statistics[AllChannels].sum_fourth_power/=channels;
channel_statistics[AllChannels].mean/=channels;
channel_statistics[AllChannels].variance/=channels;
channel_statistics[AllChannels].standard_deviation=
sqrt(channel_statistics[AllChannels].standard_deviation/channels);
channel_statistics[AllChannels].kurtosis/=channels;
channel_statistics[AllChannels].skewness/=channels;
for (i=0; i <= AllChannels; i++)
{
if (channel_statistics[i].standard_deviation == 0.0)
continue;
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-
3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+
2.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-
4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+
6.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation)-3.0;
}
return(channel_statistics);
}
|
GrB_Matrix_exportSize.c | //------------------------------------------------------------------------------
// GrB_Matrix_exportSize: determine sizes of arrays for GrB_Matrix_export
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_transpose.h"
#define GB_FREE_ALL ;
GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export
(
GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes)
GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes)
GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Matrix_exportSize (&Ap_len, &Ai_len, &Ax_len, format, A)") ;
GB_BURBLE_START ("GrB_Matrix_exportSize") ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_NULL (Ap_len) ;
GB_RETURN_IF_NULL (Ai_len) ;
GB_RETURN_IF_NULL (Ax_len) ;
GrB_Info info ;
GrB_Index nvals ;
GB_OK (GB_nvals (&nvals, A, Context)) ;
(*Ax_len) = nvals ;
//--------------------------------------------------------------------------
// determine the sizes of Ap and Ai for each format
//--------------------------------------------------------------------------
switch (format)
{
case GrB_CSR_FORMAT :
(*Ap_len) = GB_NROWS (A) + 1 ;
(*Ai_len) = nvals ;
break ;
case GrB_CSC_FORMAT :
(*Ap_len) = GB_NCOLS (A) + 1 ;
(*Ai_len) = nvals ;
break ;
// case GrB_DENSE_ROW_FORMAT :
// case GrB_DENSE_COL_FORMAT :
// (*Ap_len) = 0 ;
// (*Ai_len) = 0 ;
// if (!GB_is_dense (A))
// {
// // A must dense or full
// return (GrB_INVALID_VALUE) ;
// }
// break ;
case GrB_COO_FORMAT :
(*Ap_len) = nvals ;
(*Ai_len) = nvals ;
break ;
default :
// unknown format
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = 1; // change this every time you run this
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
printf("Enter a number of bits: "); fflush(stdout);
char status = scanf("%u",&n);
//make sure the input makes sense
if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt = n/8;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
unsigned int val = 0;
#pragma omp parallel for shared(val)
for (unsigned int i=0;i<p-1;i++) {
if (val == 0 && modExp(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i);
val = 1;
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
displacement_lagrangemultiplier_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact residual
mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm)
,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
if (mActiveDofs[dof_id]) {
residual_dof_value = rb[dof_id];
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_residual_solution_norm += residual_dof_value * residual_dof_value;
lm_dof_num++;
} else {
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mLMCurrentResidualNorm = lm_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_lm_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm;
residual_disp_ratio = 1.0;
residual_lm_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the LM
residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance);
if (disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flag
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual
TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual
TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual
std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
|
GB_unaryop__minv_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int8
// op(A') function: GB_tran__minv_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int8
(
float *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_messages.c | // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
#pragma omp declare // expected-error {{expected an OpenMP directive}}
int foo(void);
#pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}}
#pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}}
#pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} expected-error {{expected 'match' clause on}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}}
#pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}}
#pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'arch' 'isa'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}}
#pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp declare variant(foo) match(device = {kind(score(ibm) }) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<recovery-expr>()'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}}
int bar(void);
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}}
#pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}}
#pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}}
#pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}}
#pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}}
int score_and_cond_non_const();
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
#pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}}
int var;
#pragma omp threadprivate(var)
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma options align=packed
int main();
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma init_seg(compiler)
int main();
#pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int b, c;
int no_proto();
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int no_proto_too();
int proto1(int);
#pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto(); // expected-note {{previous declaration is here}}
int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}}
#pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int diff_proto1(double);
int after_use_variant(void);
int after_use();
int bar() {
return after_use();
}
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int after_use(void);
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined(void) { return 0; }
int defined1(void) { return 0; }
#pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
int defined1(void);
int diff_cc_variant(void);
#pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
__vectorcall int diff_cc(void);
int diff_ret_variant(void);
#pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void diff_ret(void);
void marked(void);
void not_marked(void);
#pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}}
void marked_variant(void);
#pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
void marked(void);
#pragma omp declare variant(foo) match(device = {isa("foo")})
int unknown_isa_trait(void);
#pragma omp declare variant(foo) match(device = {isa(foo)})
int unknown_isa_trait2(void);
#pragma omp declare variant(foo) match(device = {kind(fpga), isa(bar)})
int ignored_isa_trait(void);
void caller() {
unknown_isa_trait(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
unknown_isa_trait2(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
ignored_isa_trait();
}
// Unknown arch
#pragma omp begin declare variant match(device={isa(sse2020)}) // expected-warning {{isa trait 'sse2020' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}}
#pragma omp end declare variant
// Unknown arch guarded by arch.
#pragma omp begin declare variant match(device={isa(sse2020), arch(ppc)})
#pragma omp end declare variant
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
// FIXME: If the scores are equivalent we should detect that and allow it.
#pragma omp begin declare variant match(implementation = {vendor(score(2) \
: llvm)})
#pragma omp declare variant(foo) match(implementation = {vendor(score(2) \
: llvm)}) // expected-error@-1 {{nested OpenMP context selector contains duplicated trait 'llvm' in selector 'vendor' and set 'implementation' with different score}}
int conflicting_nested_score(void);
#pragma omp end declare variant
// FIXME: We should build the conjuction of different conditions, see also the score fixme above.
#pragma omp begin declare variant match(user = {condition(1)})
#pragma omp declare variant(foo) match(user = {condition(1)}) // expected-error {{nested user conditions in OpenMP context selector not supported (yet)}}
int conflicting_nested_condition(void);
#pragma omp end declare variant
|
shared-clause.c | /*
* private-clause.c
*
* Created on: 02/04/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main() {
int i, n = 7;
int a[n];
for (i = 0; i < n; i++)
a[i] = i + 1;
#pragma omp parallel for shared(a)
for (i = 0; i < n; i++)
a[i] += i;
printf("Después de parallel for:\n");
for (i = 0; i < n; i++)
printf("a[%d] = %d\n", i, a[i]);
return 0;
}
|
declare_reduction_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in){{$}}
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: #pragma omp declare reduction (fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: struct SSS {
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in)
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
};
// CHECK: };
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: int main(void) {
int main(void) {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
}
return 0;
}
// CHECK: }
#pragma omp declare reduction(mymin:int \
: omp_out = omp_out > omp_in ? omp_in : omp_out) \
initializer(omp_priv = 2147483647)
#pragma omp declare reduction(mymin \
: struct SSS \
: omp_out = omp_out.field > omp_in.field ? omp_in : omp_out)
int foo(int argc, char **argv) {
int x;
struct SSS ss;
#pragma omp parallel for reduction(mymin : x, ss)
for (int i = 0; i < 1000; i++)
;
return 0;
}
// CHECK: #pragma omp parallel for reduction(mymin: x,ss)
#endif
|
omp_thread_attach_test_2.c | // execute in sequence
// input the number of num_user_threadsation
#include <stdlib.h>
#include <pthread.h>
#include <omp.h>
#include <sys/timeb.h>
#include <omp_interop.h>
#include <unistd.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
int user_thread_return = 0;
void *test_fun(void *arg){
printf("thread: %d\n", *((int*)arg));
omp_thread_attach_with_callback(NULL, &user_thread_return);
printf("thread call back: %d\n", *((int*)arg));
return ((void*)0);
}
int main(int argc, char * argv[]) {
if (argc >= 2){
omp_set_num_threads(atoi(argv[1]));
num_user_threads = atoi(argv[1]);
}
int num_user_threads = 100;
pthread_t pthreads[num_user_threads];
// pthread_create
for(i=0; i<num_user_threads; i++){
pthread_create(&pthreads[i], NULL, test_fun, NULL);
}
// omp_set_nested();
// create 50 threads and put them into threadpool
#pragma omp parallel shared(user_thread_id, counter) private(tid) num_threads(50)
{
tid = omp_get_thread_num();
}
user_thread_return = 1;
for(i=0; i<num_user_threads; i++) {
pthread_join(pthreads[i], NULL);
}
// while(1);
}
|
diagsv_x_sky_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, ALPHA_Number *y)
{
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->rows; r++)
{
alpha_mul(y[r], alpha, x[r]);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
fpURFBase.h | #ifndef fpURF_h
#define fpURF_h
#include "../../../baseFunctions/fpForestBase.h"
#include <vector>
#include <map>
#include <algorithm>
#include <unordered_map>
#include <stdio.h>
#include <ctime>
#include <chrono>
#include <cstdlib>
#include "urfTree.h"
#include <sys/time.h>
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Sparse>
#include <eigen3/Eigen/Core>
using namespace Eigen;
namespace fp {
template <typename T>
class fpURFBase : public fpForestBase<T>
{
protected:
std::vector<urfTree<T> > trees;
std::map<int, std::map<int, int> > simMat;
std::map<std::pair<int, int>, double> pairMat;
typedef Eigen::SparseMatrix<int> spMat;
typedef Eigen::Triplet<int> TripType;
std::vector<TripType> tripletList;
SpMat eigenMat;
public:
~fpURFBase(){}
fpDisplayProgress printProgress;
inline void printForestType(){
std::cout << "This is a urf forest.\n";
}
inline void changeForestSize(){
trees.resize(fpSingleton::getSingleton().returnNumTrees());
}
inline void initSimMat(){
auto numObs = fpSingleton::getSingleton().returnNumObservations();
for(auto i = 0; i < numObs; ++i) {
std::map<int, int> init_map;
simMat[i] = init_map;
}
}
inline void growTrees(){
#pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads())
for(int i = 0; i < (int)trees.size(); ++i){
trees[i].growTree();
trees[i].updateSimMat(simMat, pairMat);
trees[i].updateSimMatOut(simMat, pairMat);
}
}
inline void checkParameters(){
//TODO: check parameters to make sure they make sense for this forest type.
;
}
inline void createSparseMat(){
//Not in use now. TODO: Remove entirely?
auto numObs = fpSingleton::getSingleton().returnNumObservations();
SpMat eigenSimMat(numObs, numObs);
for (auto it=pairMat.begin(); it!=pairMat.end(); ++it){
int i = (it->first).first;
int j = (it->first).second;
int v_ij = it->second;
eigenSimMat.coeffRef(i, j) = v_ij;
}
eigenSimMat.makeCompressed();
this->eigenMat = eigenSimMat ;
}
inline void printSparseMat(){
//Not in use now. TODO: Remove entirely?
for (int k = 0; k < eigenMat.outerSize(); ++k){
for (Eigen::SparseMatrix<double>::InnerIterator it(eigenMat, k); it; ++it){
std::cout << it.row() <<"\t";
std::cout << it.col() << "\t";
std::cout << it.value() << "\n";
}
}
}
inline void treeStats(){
int maxDepth=0;
int totalLeafNodes=0;
int totalLeafDepth=0;
int tempMaxDepth;
for(int i = 0; i < fpSingleton::getSingleton().returnNumTrees(); ++i){
tempMaxDepth = trees[i].returnMaxDepth();
maxDepth = ((maxDepth < tempMaxDepth) ? tempMaxDepth : maxDepth);
totalLeafNodes += trees[i].returnNumLeafNodes();
totalLeafDepth += trees[i].returnLeafDepthSum();
}
}
inline std::map<int, std::map<int, int> > returnSimMat() {
return simMat;
}
inline std::map<std::pair<int, int>, double> returnPairMat(){
return pairMat;
}
void printTree0(){
trees[0].printTree();
}
void growForest(){
changeForestSize();
growTrees();
treeStats();
}
inline int predictClass(std::vector<T>& observation){
std::cout<<"Not defined for unsupervised random forests. \n";
return 0;
}
inline int predictClass(const T *observation)
{
std::cout << "Not defined for unsupervised random forests. \n";
return 0;
}
inline std::vector<int> predictClassPost(std::vector<T> &observation)
{
std::cout << "Not defined for unsupervised random forests. \n";
return {};
}
inline float reportOOB()
{
return 0;
}
inline float testForest()
{
return 0;
}
};
}// namespace fp
#endif
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2018 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifdef OWL_ENABLE_TEMPLATE
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
GB_unop__identity_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_int8)
// op(A') function: GB (_unop_tran__identity_uint32_int8)
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_int8)
(
uint32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_dot4_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot4_template: C+=A'*B via dot products, where C is full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C+=A'*B where C is full and computed in-place. The monoid of the semiring
// matches the accum operator, and the type of C matches the ztype of accum.
// The PAIR and FIRSTJ multiplicative operators are important special cases.
// The matrix C is the user input matrix. C is not iso on output, but might
// iso on input, in which case the input iso scalar is cinput, and C->x has
// been expanded to non-iso, and initialized if A and/or B are hypersparse.
// A and/or B can be iso.
// MIN_FIRSTJ or MIN_FIRSTJ1 semirings:
#define GB_IS_MIN_FIRSTJ_SEMIRING (GB_IS_IMIN_MONOID && GB_IS_FIRSTJ_MULTIPLIER)
// MAX_FIRSTJ or MAX_FIRSTJ1 semirings:
#define GB_IS_MAX_FIRSTJ_SEMIRING (GB_IS_IMAX_MONOID && GB_IS_FIRSTJ_MULTIPLIER)
// GB_OFFSET is 1 for the MIN/MAX_FIRSTJ1 semirings, and 0 otherwise.
#if GB_IS_ANY_MONOID
#error "dot4 is not used for the ANY monoid"
#endif
#undef GB_GET4C
#define GB_GET4C(cij,p) cij = (C_in_iso) ? cinput : Cx [p]
#if ((GB_A_IS_BITMAP || GB_A_IS_FULL) && (GB_B_IS_BITMAP || GB_B_IS_FULL ))
{
//--------------------------------------------------------------------------
// C += A'*B where A and B are both bitmap/full
//--------------------------------------------------------------------------
// FUTURE: This method is not particularly efficient when both A and B are
// bitmap/full. A better method would use tiles to reduce memory traffic.
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
const int a_tid = tid / nbslice ;
const int b_tid = tid % nbslice ;
const int64_t kA_start = A_slice [a_tid] ;
const int64_t kA_end = A_slice [a_tid+1] ;
const int64_t kB_start = B_slice [b_tid] ;
const int64_t kB_end = B_slice [b_tid+1] ;
for (int64_t j = kB_start ; j < kB_end ; j++)
{
//------------------------------------------------------------------
// get B(:,j) and C(:,j)
//------------------------------------------------------------------
const int64_t pC_start = j * cvlen ;
const int64_t pB_start = j * vlen ;
//------------------------------------------------------------------
// C(:,j) += A'*B(:,j)
//------------------------------------------------------------------
for (int64_t i = kA_start ; i < kA_end ; i++)
{
//--------------------------------------------------------------
// get A(:,i)
//--------------------------------------------------------------
const int64_t pA = i * vlen ;
//--------------------------------------------------------------
// get C(i,j)
//--------------------------------------------------------------
int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC]
GB_CTYPE GB_GET4C (cij, pC) ; // cij = Cx [pC]
//--------------------------------------------------------------
// C(i,j) += A (:,i)*B(:,j): a single dot product
//--------------------------------------------------------------
int64_t pB = pB_start ;
#if ( GB_A_IS_FULL && GB_B_IS_FULL )
{
//----------------------------------------------------------
// both A and B are full
//----------------------------------------------------------
#if GB_IS_PAIR_MULTIPLIER
{
#if GB_IS_EQ_MONOID
// EQ_PAIR semiring
cij = (cij == 1) ;
#elif (GB_CTYPE_BITS > 0)
// PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)),
// for bool, 8-bit, 16-bit, or 32-bit integer
uint64_t t = ((uint64_t) cij) + vlen ;
cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ;
#elif GB_IS_PLUS_FC32_MONOID
// PLUS monoid for float complex
cij = GxB_CMPLXF (crealf (cij) + (float) vlen, 0) ;
#elif GB_IS_PLUS_FC64_MONOID
// PLUS monoid for double complex
cij = GxB_CMPLX (creal (cij) + (double) vlen, 0) ;
#else
// PLUS monoid for float, double, or 64-bit integers
cij += (GB_CTYPE) vlen ;
#endif
}
#elif GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry
if (vlen > 0)
{
int64_t k = GB_OFFSET ;
cij = GB_IMIN (cij, k) ;
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry
if (vlen > 0)
{
int64_t k = vlen-1 + GB_OFFSET ;
cij = GB_IMAX (cij, k) ;
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t k = 0 ; k < vlen ; k++)
{
GB_DOT (k, pA+k, pB+k) ; // cij += A(k,i)*B(k,j)
}
}
#endif
}
#elif ( GB_A_IS_FULL && GB_B_IS_BITMAP )
{
//----------------------------------------------------------
// A is full and B is bitmap
//----------------------------------------------------------
#if GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry in B(:,j)
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Bb [pB+k])
{
cij = GB_IMIN (cij, k + GB_OFFSET) ;
break ;
}
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry in B(:,j)
for (int64_t k = vlen-1 ; k >= 0 ; k--)
{
if (Bb [pB+k])
{
cij = GB_IMAX (cij, k + GB_OFFSET) ;
break ;
}
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Bb [pB+k])
{
GB_DOT (k, pA+k, pB+k) ; // cij += A(k,i)*B(k,j)
}
}
}
#endif
}
#elif ( GB_A_IS_BITMAP && GB_B_IS_FULL )
{
//----------------------------------------------------------
// A is bitmap and B is full
//----------------------------------------------------------
#if GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry in A(:,i)
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Ab [pA+k])
{
cij = GB_IMIN (cij, k + GB_OFFSET) ;
break ;
}
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry in A(:,i)
for (int64_t k = vlen-1 ; k >= 0 ; k--)
{
if (Ab [pA+k])
{
cij = GB_IMAX (cij, k + GB_OFFSET) ;
break ;
}
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Ab [pA+k])
{
GB_DOT (k, pA+k, pB+k) ; // cij += A(k,i)*B(k,j)
}
}
}
#endif
}
#elif ( GB_A_IS_BITMAP && GB_B_IS_BITMAP )
{
//----------------------------------------------------------
// both A and B are bitmap
//----------------------------------------------------------
#if GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Ab [pA+k] && Bb [pB+k])
{
cij = GB_IMIN (cij, k + GB_OFFSET) ;
break ;
}
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry
for (int64_t k = vlen-1 ; k >= 0 ; k--)
{
if (Ab [pA+k] && Bb [pB+k])
{
cij = GB_IMAX (cij, k + GB_OFFSET) ;
break ;
}
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t k = 0 ; k < vlen ; k++)
{
if (Ab [pA+k] && Bb [pB+k])
{
GB_DOT (k, pA+k, pB+k) ; // cij += A(k,i)*B(k,j)
}
}
}
#endif
}
#endif
//--------------------------------------------------------------
// save C(i,j)
//--------------------------------------------------------------
Cx [pC] = cij ;
}
}
}
}
#elif ((GB_A_IS_SPARSE || GB_A_IS_HYPER) && (GB_B_IS_BITMAP || GB_B_IS_FULL ))
{
//--------------------------------------------------------------------------
// C += A'*B when A is sparse/hyper and B is bitmap/full
//--------------------------------------------------------------------------
// special cases: these methods are very fast, but cannot do not need
// to be unrolled.
#undef GB_SPECIAL_CASE_OR_TERMINAL
#define GB_SPECIAL_CASE_OR_TERMINAL \
( GB_IS_PAIR_MULTIPLIER /* the multiply op is PAIR */ \
|| GB_IS_MIN_FIRSTJ_SEMIRING /* min_firstj semiring */ \
|| GB_IS_MAX_FIRSTJ_SEMIRING /* max_firstj semiring */ \
|| GB_MONOID_IS_TERMINAL /* monoid has a terminal value */ \
|| GB_B_IS_PATTERN ) /* B is pattern-only */
// Transpose B and unroll the innermost loop if this condition holds: A
// must be sparse, B must be full, and no special semirings or operators
// can be used. The monoid must not be terminal. These conditions are
// known at compile time.
#undef GB_UNROLL
#define GB_UNROLL \
( GB_A_IS_SPARSE && GB_B_IS_FULL && !( GB_SPECIAL_CASE_OR_TERMINAL ) )
// If GB_UNROLL is true at compile-time, the simpler variant can still be
// used, without unrolling, for any of these conditions: (1) A is very
// sparse (fewer entries than the size of the W workspace) or (2) B is iso.
// The unrolled method does not allow B to be iso or pattern-only (such as
// for the FIRST multiplicative operator. If B is iso or pattern-only, the
// dense matrix G = B' would be a single scalar, or its values would not be
// accessed at all, so there is no benefit to computing G.
#if GB_UNROLL
const int64_t wp = (bvdim == 1) ? 0 : GB_IMIN (bvdim, 4) ;
const int64_t anz = GB_nnz (A) ;
if (anz < wp * vlen || B_iso)
#endif
{
//----------------------------------------------------------------------
// C += A'*B without workspace
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
const int64_t kA_start = A_slice [tid] ;
const int64_t kA_end = A_slice [tid+1] ;
//------------------------------------------------------------------
// C+=A'*B where A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------
if (bvdim == 1)
{
//--------------------------------------------------------------
// C += A'*B where C is a single vector
//--------------------------------------------------------------
#define pC_start 0
#define pB 0
#define j 0
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
// get A(:,i)
#if GB_A_IS_HYPER
const int64_t i = Ah [kA] ;
#else
const int64_t i = kA ;
#endif
int64_t pA = Ap [kA] ;
const int64_t pA_end = Ap [kA+1] ;
const int64_t ainz = pA_end - pA ;
// C(i) += A(:,i)'*B(:,0)
#include "GB_AxB_dot4_cij.c"
}
#undef pC_start
#undef pB
#undef j
}
else
{
//--------------------------------------------------------------
// C += A'*B where C is a matrix
//--------------------------------------------------------------
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
// get A(:,i)
#if GB_A_IS_HYPER
const int64_t i = Ah [kA] ;
#else
const int64_t i = kA ;
#endif
int64_t pA = Ap [kA] ;
const int64_t pA_end = Ap [kA+1] ;
const int64_t ainz = pA_end - pA ;
// C(i,:) += A(:,i)'*B
for (int64_t j = 0 ; j < bvdim ; j++)
{
// get B(:,j) and C(:,j)
const int64_t pC_start = j * cvlen ;
const int64_t pB = j * vlen ;
// C(i,j) += A(:,i)'*B(:,j)
#include "GB_AxB_dot4_cij.c"
}
}
}
}
}
#if GB_UNROLL
else
{
//----------------------------------------------------------------------
// C += A'*B: with workspace W for transposing B, one panel at a time
//----------------------------------------------------------------------
size_t W_size = 0 ;
GB_BTYPE *restrict W = NULL ;
if (bvdim > 1)
{
W = GB_MALLOC_WORK (wp * vlen, GB_BTYPE, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
}
for (int64_t j1 = 0 ; j1 < bvdim ; j1 += 4)
{
//------------------------------------------------------------------
// C(:,j1:j2-1) += A * B (:,j1:j2-1) for a single panel
//------------------------------------------------------------------
const int64_t j2 = GB_IMIN (j1 + 4, bvdim) ;
switch (j2 - j1)
{
default :
case 1 :
{
//----------------------------------------------------------
// C(:,j1:j2-1) is a single vector; use B(:,j1) in place
//----------------------------------------------------------
const GB_BTYPE *restrict G = Bx + j1 * vlen ;
int tid ;
#pragma omp parallel for num_threads(nthreads) \
schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// get the task descriptor
const int64_t kA_start = A_slice [tid] ;
const int64_t kA_end = A_slice [tid+1] ;
for (int64_t i = kA_start ; i < kA_end ; i++)
{
// get A(:,i)
const int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
// cx [0] = C(i,j1)
GB_CTYPE cx [1] ;
GB_GET4C (cx [0], i + j1*cvlen) ;
// cx [0] += A (:,i)'*G
for (int64_t p = pA ; p < pA_end ; p++)
{
// aki = A(k,i)
const int64_t k = Ai [p] ;
GB_GETA (aki, Ax, p, A_iso) ;
// cx [0] += A(k,i)*G(k,0)
GB_MULTADD (cx [0], aki, G [k], i, k, j1) ;
}
// C(i,j1) = cx [0]
Cx [i + j1*cvlen] = cx [0] ;
}
}
}
break ;
case 2 :
{
//----------------------------------------------------------
// G = B(:,j1:j1+1) and convert to row-form
//----------------------------------------------------------
GB_BTYPE *restrict G = W ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) \
schedule(static)
for (k = 0 ; k < vlen ; k++)
{
// G (k,0:1) = B (k,j1:j1+1)
const int64_t k2 = k << 1 ;
G [k2 ] = Bx [k + (j1 ) * vlen] ;
G [k2 + 1] = Bx [k + (j1 + 1) * vlen] ;
}
//----------------------------------------------------------
// C += A'*G where G is vlen-by-2 in row-form
//----------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) \
schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// get the task descriptor
const int64_t kA_start = A_slice [tid] ;
const int64_t kA_end = A_slice [tid+1] ;
for (int64_t i = kA_start ; i < kA_end ; i++)
{
// get A(:,i)
const int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
// cx [0:1] = C(i,j1:j1+1)
GB_CTYPE cx [2] ;
GB_GET4C (cx [0], i + (j1 )*cvlen) ;
GB_GET4C (cx [1], i + (j1+1)*cvlen) ;
// cx [0:1] += A (:,i)'*G
for (int64_t p = pA ; p < pA_end ; p++)
{
// aki = A(k,i)
const int64_t k = Ai [p] ;
GB_GETA (aki, Ax, p, A_iso) ;
const int64_t k2 = k << 1 ;
// cx [0:1] += A(k,i)*G(k,0:1)
GB_MULTADD (cx [0], aki, G [k2], i, k, j1) ;
GB_MULTADD (cx [1], aki, G [k2+1], i, k, j1+1) ;
}
// C(i,j1:j1+1) = cx [0:1]
Cx [i + (j1 )*cvlen] = cx [0] ;
Cx [i + (j1+1)*cvlen] = cx [1] ;
}
}
}
break ;
case 3 :
{
//----------------------------------------------------------
// G = B(:,j1:j1+2) and convert to row-form
//----------------------------------------------------------
GB_BTYPE *restrict G = W ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) \
schedule(static)
for (k = 0 ; k < vlen ; k++)
{
// G (k,0:2) = B (k,j1:j1+2)
const int64_t k3 = k * 3 ;
G [k3 ] = Bx [k + (j1 ) * vlen] ;
G [k3 + 1] = Bx [k + (j1 + 1) * vlen] ;
G [k3 + 2] = Bx [k + (j1 + 2) * vlen] ;
}
//----------------------------------------------------------
// C += A'*G where G is vlen-by-3 in row-form
//----------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) \
schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// get the task descriptor
const int64_t kA_start = A_slice [tid] ;
const int64_t kA_end = A_slice [tid+1] ;
for (int64_t i = kA_start ; i < kA_end ; i++)
{
// get A(:,i)
const int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
// cx [0:2] = C(i,j1:j1+2)
GB_CTYPE cx [3] ;
GB_GET4C (cx [0], i + (j1 )*cvlen) ;
GB_GET4C (cx [1], i + (j1+1)*cvlen) ;
GB_GET4C (cx [2], i + (j1+2)*cvlen) ;
// cx [0:2] += A (:,i)'*G
for (int64_t p = pA ; p < pA_end ; p++)
{
// aki = A(k,i)
const int64_t k = Ai [p] ;
GB_GETA (aki, Ax, p, A_iso) ;
const int64_t k3 = k * 3 ;
// cx [0:2] += A(k,i)*G(k,0:2)
GB_MULTADD (cx [0], aki, G [k3 ], i, k, j1) ;
GB_MULTADD (cx [1], aki, G [k3+1], i, k, j1+1) ;
GB_MULTADD (cx [2], aki, G [k3+2], i, k, j1+2) ;
}
// C(i,j1:j1+2) = cx [0:2]
Cx [i + (j1 )*cvlen] = cx [0] ;
Cx [i + (j1+1)*cvlen] = cx [1] ;
Cx [i + (j1+2)*cvlen] = cx [2] ;
}
}
}
break ;
case 4 :
{
//----------------------------------------------------------
// G = B(:,j1:j1+3) and convert to row-form
//----------------------------------------------------------
GB_BTYPE *restrict G = W ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) \
schedule(static)
for (k = 0 ; k < vlen ; k++)
{
// G (k,0:3) = B (k,j1:j1+3)
const int64_t k4 = k << 2 ;
G [k4 ] = Bx [k + (j1 ) * vlen] ;
G [k4 + 1] = Bx [k + (j1 + 1) * vlen] ;
G [k4 + 2] = Bx [k + (j1 + 2) * vlen] ;
G [k4 + 3] = Bx [k + (j1 + 3) * vlen] ;
}
//----------------------------------------------------------
// C += A'*G where G is vlen-by-4 in row-form
//----------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) \
schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// get the task descriptor
const int64_t kA_start = A_slice [tid] ;
const int64_t kA_end = A_slice [tid+1] ;
for (int64_t i = kA_start ; i < kA_end ; i++)
{
// get A(:,i)
const int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
// cx [0:3] = C(i,j1:j1+3)
GB_CTYPE cx [4] ;
GB_GET4C (cx [0], i + (j1 )*cvlen) ;
GB_GET4C (cx [1], i + (j1+1)*cvlen) ;
GB_GET4C (cx [2], i + (j1+2)*cvlen) ;
GB_GET4C (cx [3], i + (j1+3)*cvlen) ;
// cx [0:3] += A (:,i)'*G
for (int64_t p = pA ; p < pA_end ; p++)
{
// aki = A(k,i)
const int64_t k = Ai [p] ;
GB_GETA (aki, Ax, p, A_iso) ;
const int64_t k4 = k << 2 ;
// cx [0:3] += A(k,i)*G(k,0:3)
GB_MULTADD (cx [0], aki, G [k4 ], i, k, j1) ;
GB_MULTADD (cx [1], aki, G [k4+1], i, k, j1+1) ;
GB_MULTADD (cx [2], aki, G [k4+2], i, k, j1+2) ;
GB_MULTADD (cx [3], aki, G [k4+3], i, k, j1+3) ;
}
// C(i,j1:j1+3) = cx [0:3]
Cx [i + (j1 )*cvlen] = cx [0] ;
Cx [i + (j1+1)*cvlen] = cx [1] ;
Cx [i + (j1+2)*cvlen] = cx [2] ;
Cx [i + (j1+3)*cvlen] = cx [3] ;
}
}
}
break ;
}
}
// free workspace
GB_FREE_WORK (&W, W_size) ;
}
#endif
}
#elif ( (GB_A_IS_BITMAP || GB_A_IS_FULL) && (GB_B_IS_SPARSE || GB_B_IS_HYPER))
{
//--------------------------------------------------------------------------
// C += A'*B where A is bitmap/full and B is sparse/hyper
//--------------------------------------------------------------------------
// FUTURE: this can be unrolled, like the case above
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
const int64_t kB_start = B_slice [tid] ;
const int64_t kB_end = B_slice [tid+1] ;
for (int64_t kB = kB_start ; kB < kB_end ; kB++)
{
//------------------------------------------------------------------
// get B(:,j) and C(:,j)
//------------------------------------------------------------------
#if GB_B_IS_HYPER
const int64_t j = Bh [kB] ;
#else
const int64_t j = kB ;
#endif
const int64_t pC_start = j * cvlen ;
const int64_t pB_start = Bp [kB] ;
const int64_t pB_end = Bp [kB+1] ;
const int64_t bjnz = pB_end - pB_start ;
//------------------------------------------------------------------
// C(:,j) += A'*B(:,j)
//------------------------------------------------------------------
for (int64_t i = 0 ; i < avdim ; i++)
{
//--------------------------------------------------------------
// get A(:,i)
//--------------------------------------------------------------
const int64_t pA = i * vlen ;
//--------------------------------------------------------------
// get C(i,j)
//--------------------------------------------------------------
int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC]
GB_CTYPE GB_GET4C (cij, pC) ; // cij = Cx [pC]
//--------------------------------------------------------------
// C(i,j) += A (:,i)*B(:,j): a single dot product
//--------------------------------------------------------------
int64_t pB = pB_start ;
#if ( GB_A_IS_FULL )
{
//----------------------------------------------------------
// A is full and B is sparse/hyper
//----------------------------------------------------------
#if GB_IS_PAIR_MULTIPLIER
{
#if GB_IS_EQ_MONOID
// EQ_PAIR semiring
cij = (cij == 1) ;
#elif (GB_CTYPE_BITS > 0)
// PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)),
// for bool, 8-bit, 16-bit, or 32-bit integer
uint64_t t = ((uint64_t) cij) + bjnz ;
cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ;
#elif GB_IS_PLUS_FC32_MONOID
// PLUS monoid for float complex
cij = GxB_CMPLXF (crealf (cij) + (float) bjnz, 0) ;
#elif GB_IS_PLUS_FC64_MONOID
// PLUS monoid for double complex
cij = GxB_CMPLX (creal (cij) + (double) bjnz, 0) ;
#else
// PLUS monoid for float, double, or 64-bit integers
cij += (GB_CTYPE) bjnz ;
#endif
}
#elif GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry in B(:,j)
if (bjnz > 0)
{
int64_t k = Bi [pB] + GB_OFFSET ;
cij = GB_IMIN (cij, k) ;
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry in B(:,j)
if (bjnz > 0)
{
int64_t k = Bi [pB_end-1] + GB_OFFSET ;
cij = GB_IMAX (cij, k) ;
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t p = pB ; p < pB_end ; p++)
{
int64_t k = Bi [p] ;
GB_DOT (k, pA+k, p) ; // cij += A(k,i)*B(k,j)
}
}
#endif
}
#else
{
//----------------------------------------------------------
// A is bitmap and B is sparse/hyper
//----------------------------------------------------------
#if GB_IS_MIN_FIRSTJ_SEMIRING
{
// MIN_FIRSTJ semiring: take the first entry
for (int64_t p = pB ; p < pB_end ; p++)
{
int64_t k = Bi [p] ;
if (Ab [pA+k])
{
cij = GB_IMIN (cij, k + GB_OFFSET) ;
break ;
}
}
}
#elif GB_IS_MAX_FIRSTJ_SEMIRING
{
// MAX_FIRSTJ semiring: take the last entry
for (int64_t p = pB_end-1 ; p >= pB ; p--)
{
int64_t k = Bi [p] ;
if (Ab [pA+k])
{
cij = GB_IMAX (cij, k + GB_OFFSET) ;
break ;
}
}
}
#else
{
GB_PRAGMA_SIMD_DOT (cij)
for (int64_t p = pB ; p < pB_end ; p++)
{
int64_t k = Bi [p] ;
if (Ab [pA+k])
{
GB_DOT (k, pA+k, p) ; // cij += A(k,i)*B(k,j)
}
}
}
#endif
}
#endif
//--------------------------------------------------------------
// save C(i,j)
//--------------------------------------------------------------
Cx [pC] = cij ;
}
}
}
}
#elif ( (GB_A_IS_SPARSE || GB_A_IS_HYPER) && (GB_B_IS_SPARSE || GB_B_IS_HYPER))
{
//--------------------------------------------------------------------------
// C+=A'*B where A and B are both sparse/hyper
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
const int a_tid = tid / nbslice ;
const int b_tid = tid % nbslice ;
const int64_t kA_start = A_slice [a_tid] ;
const int64_t kA_end = A_slice [a_tid+1] ;
const int64_t kB_start = B_slice [b_tid] ;
const int64_t kB_end = B_slice [b_tid+1] ;
//----------------------------------------------------------------------
// C+=A'*B via dot products
//----------------------------------------------------------------------
for (int64_t kB = kB_start ; kB < kB_end ; kB++)
{
//------------------------------------------------------------------
// get B(:,j) and C(:,j)
//------------------------------------------------------------------
#if GB_B_IS_HYPER
const int64_t j = Bh [kB] ;
#else
const int64_t j = kB ;
#endif
const int64_t pC_start = j * cvlen ;
const int64_t pB_start = Bp [kB] ;
const int64_t pB_end = Bp [kB+1] ;
const int64_t bjnz = pB_end - pB_start ;
//------------------------------------------------------------------
// C(:,j) += A'*B(:,j) where C is full
//------------------------------------------------------------------
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
//--------------------------------------------------------------
// get A(:,i)
//--------------------------------------------------------------
#if GB_A_IS_HYPER
const int64_t i = Ah [kA] ;
#else
const int64_t i = kA ;
#endif
int64_t pA = Ap [kA] ;
const int64_t pA_end = Ap [kA+1] ;
const int64_t ainz = pA_end - pA ;
//--------------------------------------------------------------
// get C(i,j)
//--------------------------------------------------------------
int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC]
GB_CTYPE GB_GET4C (cij, pC) ; // cij = Cx [pC]
//--------------------------------------------------------------
// C(i,j) += A (:,i)*B(:,j): a single dot product
//--------------------------------------------------------------
int64_t pB = pB_start ;
//----------------------------------------------------------
// both A and B are sparse/hyper
//----------------------------------------------------------
// The MIN_FIRSTJ semirings are exploited, by terminating as
// soon as any entry is found. The MAX_FIRSTJ semirings are
// not treated specially here. They could be done with a
// backwards traversal of the sparse vectors A(:,i) and
// B(:,j).
if (ainz == 0 || bjnz == 0 ||
Ai [pA_end-1] < Bi [pB_start] ||
Bi [pB_end-1] < Ai [pA])
{
//------------------------------------------------------
// A(:,i) and B(:,j) don't overlap, or are empty
//------------------------------------------------------
}
else if (ainz > 8 * bjnz)
{
//------------------------------------------------------
// B(:,j) is very sparse compared to A(:,i)
//------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t ia = Ai [pA] ;
int64_t ib = Bi [pB] ;
if (ia < ib)
{
// A(ia,i) appears before B(ib,j)
// discard all entries A(ia:ib-1,i)
int64_t pleft = pA + 1 ;
int64_t pright = pA_end - 1 ;
GB_TRIM_BINARY_SEARCH (ib, Ai, pleft, pright) ;
ASSERT (pleft > pA) ;
pA = pleft ;
}
else if (ib < ia)
{
// B(ib,j) appears before A(ia,i)
pB++ ;
}
else // ia == ib == k
{
// A(k,i) and B(k,j) are next entries to merge
GB_DOT (ia, pA, pB) ; // cij += A(k,i)*B(k,j)
#if GB_IS_MIN_FIRSTJ_SEMIRING
break ;
#endif
pA++ ;
pB++ ;
}
}
}
else if (bjnz > 8 * ainz)
{
//------------------------------------------------------
// A(:,i) is very sparse compared to B(:,j)
//------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t ia = Ai [pA] ;
int64_t ib = Bi [pB] ;
if (ia < ib)
{
// A(ia,i) appears before B(ib,j)
pA++ ;
}
else if (ib < ia)
{
// B(ib,j) appears before A(ia,i)
// discard all entries B(ib:ia-1,j)
int64_t pleft = pB + 1 ;
int64_t pright = pB_end - 1 ;
GB_TRIM_BINARY_SEARCH (ia, Bi, pleft, pright) ;
ASSERT (pleft > pB) ;
pB = pleft ;
}
else // ia == ib == k
{
// A(k,i) and B(k,j) are next entries to merge
GB_DOT (ia, pA, pB) ; // cij += A(k,i)*B(k,j)
#if GB_IS_MIN_FIRSTJ_SEMIRING
break ;
#endif
pA++ ;
pB++ ;
}
}
}
else
{
//------------------------------------------------------
// A(:,i) and B(:,j) have about the same sparsity
//------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t ia = Ai [pA] ;
int64_t ib = Bi [pB] ;
if (ia < ib)
{
// A(ia,i) appears before B(ib,j)
pA++ ;
}
else if (ib < ia)
{
// B(ib,j) appears before A(ia,i)
pB++ ;
}
else // ia == ib == k
{
// A(k,i) and B(k,j) are the entries to merge
GB_DOT (ia, pA, pB) ; // cij += A(k,i)*B(k,j)
#if GB_IS_MIN_FIRSTJ_SEMIRING
break ;
#endif
pA++ ;
pB++ ;
}
}
}
//--------------------------------------------------------------
// save C(i,j)
//--------------------------------------------------------------
Cx [pC] = cij ;
}
}
}
}
#endif
#undef GB_IS_MIN_FIRSTJ_SEMIRING
#undef GB_IS_MAX_FIRSTJ_SEMIRING
#undef GB_GET4C
#undef GB_SPECIAL_CASE_OR_TERMINAL
#undef GB_UNROLL
|
GB_binop__div_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__div_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint32)
// A*D function (colscale): GB (_AxD__div_uint32)
// D*A function (rowscale): GB (_DxB__div_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint32)
// C=scalar+B GB (_bind1st__div_uint32)
// C=scalar+B' GB (_bind1st_tran__div_uint32)
// C=A+scalar GB (_bind2nd__div_uint32)
// C=A'+scalar GB (_bind2nd_tran__div_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (x, y, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__div_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \
}
GrB_Info GB (_bind1st_tran__div_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CRSMatrix.h | /**
* Copyright (c) 2012, OpenGeoSys Community (http://www.opengeosys.com)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.com/LICENSE.txt
*
*
* \file CRSMatrix.h
*
* Created on 2011-09-20 by Thomas Fischer
*/
#ifndef CRSMATRIX_H
#define CRSMATRIX_H
#include <string>
#include <fstream>
#include <iostream>
#include <cassert>
// Base
#include <algorithm>
#include "BaseLib/CodingTools.h"
// MathLib
#include "SparseMatrixBase.h"
#include "sparse_io.h"
#include "amuxCRS.h"
#include "../Preconditioner/generateDiagPrecond.h"
namespace MathLib {
#ifdef MSVC_VER
#pragma warning(push)
#pragma warning(disable: 4018)
#endif
template<typename FP_TYPE, typename IDX_TYPE>
class CRSMatrix: public SparseMatrixBase<FP_TYPE, IDX_TYPE>
{
public:
CRSMatrix() :
_row_ptr(NULL), _col_idx(NULL), _data(NULL)
{
}
explicit CRSMatrix(std::string const &fname) :
SparseMatrixBase<FP_TYPE, IDX_TYPE>(),
_row_ptr(NULL), _col_idx(NULL), _data(NULL)
{
std::ifstream in(fname.c_str(), std::ios::in | std::ios::binary);
if (in) {
CS_read(in, SparseMatrixBase<FP_TYPE, IDX_TYPE>::_n_rows, _row_ptr, _col_idx, _data);
SparseMatrixBase<FP_TYPE, IDX_TYPE>::_n_cols = SparseMatrixBase<FP_TYPE, IDX_TYPE>::_n_rows;
in.close();
} else {
std::cout << "cannot open " << fname << std::endl;
}
}
//CRSMatrix(const CRSMatrix &src) :
//SparseMatrixBase<FP_TYPE, IDX_TYPE>(src.getNRows(),src.getNCols())
//{
// _row_ptr = new IDX_TYPE[src.getNRows()];
// for (IDX_TYPE i=0; i<src.getNRows(); i++)
// _row_ptr[i] = src._row_ptr[i];
// _col_idx = new IDX_TYPE[src.getNNZ()];
// for (IDX_TYPE i=0; i<src.getNNZ(); i++)
// _col_idx[i] = src._col_idx[i];
// _data = new FP_TYPE[src.getNNZ()];
// for (IDX_TYPE i=0; i<src.getNNZ(); i++)
// _data[i] = src._data[i];
//}
//CRSMatrix& operator=(const CRSMatrix &src)
//{
// _n_rows = src._n_rows;
// _n_cols = src._n_cols;
// _row_ptr = new IDX_TYPE[src.getNRows()];
// for (IDX_TYPE i=0; i<src.getNRows(); i++)
// _row_ptr[i] = src._row_ptr[i];
// _col_idx = new IDX_TYPE[src.getNNZ()];
// for (IDX_TYPE i=0; i<src.getNNZ(); i++)
// _col_idx[i] = src._col_idx[i];
// _data = new FP_TYPE[src.getNNZ()];
// for (IDX_TYPE i=0; i<src.getNNZ(); i++)
// _data[i] = src._data[i];
//}
CRSMatrix* clone()
{
CRSMatrix<FP_TYPE, IDX_TYPE> *obj = new CRSMatrix<FP_TYPE, IDX_TYPE>(MatrixBase<IDX_TYPE>::_n_rows);
const IDX_TYPE n_nz = getNNZ();
obj->_row_ptr = new IDX_TYPE[MatrixBase<IDX_TYPE>::_n_rows+1];
obj->_col_idx = new IDX_TYPE[n_nz];
obj->_data = new FP_TYPE[n_nz];
for (IDX_TYPE i=0; i<MatrixBase<IDX_TYPE>::_n_rows+1; i++)
obj->_row_ptr[i] = _row_ptr[i];
for (IDX_TYPE i=0; i<n_nz; i++)
obj->_col_idx[i] = _col_idx[i];
for (IDX_TYPE i=0; i<n_nz; i++)
obj->_data[i] = _data[i];
return obj;
}
CRSMatrix(IDX_TYPE n, IDX_TYPE *iA, IDX_TYPE *jA, FP_TYPE* A) :
SparseMatrixBase<FP_TYPE, IDX_TYPE>(n,n),
_row_ptr(iA), _col_idx(jA), _data(A)
{}
CRSMatrix(IDX_TYPE n1) :
SparseMatrixBase<FP_TYPE, IDX_TYPE>(n1, n1),
_row_ptr(NULL), _col_idx(NULL), _data(NULL)
{}
virtual ~CRSMatrix()
{
delete [] _row_ptr;
delete [] _col_idx;
delete [] _data;
}
CRSMatrix<FP_TYPE,IDX_TYPE>& operator= (FP_TYPE a)
{
for (IDX_TYPE i=0; i<getNNZ(); i++)
_data[i] = a;
return *this;
}
virtual void amux(FP_TYPE d, FP_TYPE const * const __restrict__ x, FP_TYPE * __restrict__ y) const
{
amuxCRS<FP_TYPE, IDX_TYPE>(d, this->getNRows(), _row_ptr, _col_idx, _data, x, y);
}
virtual void precondApply(FP_TYPE* /*x*/) const
{}
/**
* get the number of non-zero entries
* @return number of non-zero entries
*/
IDX_TYPE getNNZ() const { return _row_ptr[MatrixBase<IDX_TYPE>::_n_rows]; }
/**
* This method inserts/overwrites a non-zero matrix entry.
* Precondition: the entry have to be in the sparsity pattern!
* @param row the row number
* @param col the column number
* @param val the value that should be set at pos row,col
* @return a value > 0, if the entry is not contained in the sparsity pattern
*/
int setValue(IDX_TYPE row, IDX_TYPE col, FP_TYPE val)
{
assert(0 <= (signed)row && row < MatrixBase<IDX_TYPE>::_n_rows);
// linear search - for matrices with many entries per row binary search is much faster
const IDX_TYPE idx_end (_row_ptr[row+1]);
IDX_TYPE j(_row_ptr[row]), k;
while (j<idx_end && (k=_col_idx[j]) <= col) {
if (k == col) {
_data[j] = val;
return 0;
}
j++;
}
return 1;
}
/**
* This method adds value val to an existing matrix entry at position row,col.
* Precondition: the entry have to be in the sparsity pattern!
* @param row the row number
* @param col the column number
* @param val the value that should be set at pos row,col
* @return a value > 0, if the entry is not contained in the sparsity pattern
*/
int addValue(IDX_TYPE row, IDX_TYPE col, FP_TYPE val)
{
assert(0 <= (signed)row && row < MatrixBase<IDX_TYPE>::_n_rows);
// linear search - for matrices with many entries per row binary search is much faster
const IDX_TYPE idx_end (_row_ptr[row+1]);
IDX_TYPE j(_row_ptr[row]), k;
while (j<idx_end && (k=_col_idx[j]) <= col) {
if (k == col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
_data[j] += val;
return 0;
}
j++;
}
return 1;
}
/**
* This is an access operator to a non-zero matrix entry. If the value of
* a non-existing matrix entry is requested it will be 0.0 returned.
* @param row the row number
* @param col the column number
* @return The corresponding matrix entry or 0.0.
*/
double getValue(IDX_TYPE row, IDX_TYPE col)
{
assert(0 <= (signed)row && row < MatrixBase<IDX_TYPE>::_n_rows);
// linear search - for matrices with many entries per row binary search is much faster
const IDX_TYPE idx_end (_row_ptr[row+1]);
IDX_TYPE j(_row_ptr[row]), k;
while (j<idx_end && (k=_col_idx[j]) <= col) {
if (k == col) {
return _data[j];
}
j++;
}
return 0.0;
}
/**
* This is the constant access operator to a non-zero matrix entry.
* Precondition: the entries have to be in the sparsity pattern!
* @param row the row number
* @param col the column number
* @return The corresponding matrix entry.
*/
FP_TYPE operator() (IDX_TYPE row, IDX_TYPE col) const
{
assert(0 <= (signed)row && row < MatrixBase<IDX_TYPE>::_n_rows);
// linear search - for matrices with many entries per row binary search is much faster
const IDX_TYPE idx_end (_row_ptr[row+1]);
IDX_TYPE j(_row_ptr[row]), k;
while (j<idx_end && (k=_col_idx[j]) <= col) {
if (k == col) {
return _data[j];
}
j++;
}
return 0.0;
}
/**
* get const access to the row pointer array of CRS matrix
* @return the index array _row_ptr
*/
IDX_TYPE const* getRowPtrArray() const { return _row_ptr; }
/**
* get const access to the column index array of CRS matrix
* @return the index array _col_idx
*/
IDX_TYPE const* getColIdxArray ()const { return _col_idx; }
/**
* get the matrix entries within an array of CRS matrix
* @return
*/
FP_TYPE const* getEntryArray() const { return _data; }
/**
* erase rows and columns from sparse matrix
* @param n_rows_cols number of rows / columns to remove
* @param rows_cols sorted list of rows/columns that should be removed
*/
void eraseEntries(IDX_TYPE n_rows_cols, IDX_TYPE const* const rows_cols)
{
IDX_TYPE n_cols(MatrixBase<IDX_TYPE>::_n_rows);
//*** remove the rows
removeRows(n_rows_cols, rows_cols);
//*** transpose
transpose(n_cols);
//*** remove columns in original means removing rows in the transposed
removeRows(n_rows_cols, rows_cols);
//*** transpose again
transpose(MatrixBase<IDX_TYPE>::_n_rows);
}
/**
* get the j-th column of the sparse matrix
* @param j the column number that should be returned
* @param column_entries the column entries (have to be allocated
*/
void getColumn(IDX_TYPE j, FP_TYPE* column_entries) const
{
for (IDX_TYPE k(0); k<MatrixBase<IDX_TYPE>::_n_rows; k++) {
const IDX_TYPE end_row(_row_ptr[k+1]);
IDX_TYPE i(_row_ptr[k+1]);
while (i<end_row && _col_idx[i] != j) {
i++;
}
if (i==end_row) {
column_entries[k] = 0.0;
} else {
column_entries[k] = _data[i];
}
}
}
//#ifndef NDEBUG
void printMat() const
{
for (IDX_TYPE k(0); k<MatrixBase<IDX_TYPE>::_n_rows; k++) {
std::cout << k << ": " << std::flush;
const IDX_TYPE row_end(_row_ptr[k+1]);
for (IDX_TYPE j(_row_ptr[k]); j<row_end; j++) {
std::cout << _col_idx[j] << " " << std::flush;
}
std::cout << std::endl;
}
for (IDX_TYPE k(0); k<getNNZ(); k++) {
std::cout << _data[k] << " ";
}
std::cout << std::endl;
}
//#endif
protected:
void removeRows (IDX_TYPE n_rows_cols, IDX_TYPE const*const rows)
{
//*** determine the number of new rows and the number of entries without the rows
const IDX_TYPE n_new_rows(MatrixBase<IDX_TYPE>::_n_rows - n_rows_cols);
IDX_TYPE *row_ptr_new(new IDX_TYPE[n_new_rows+1]);
row_ptr_new[0] = 0;
IDX_TYPE row_cnt (1), erase_row_cnt(0);
for (IDX_TYPE k(0); k<MatrixBase<IDX_TYPE>::_n_rows; k++) {
if (erase_row_cnt < n_rows_cols) {
if (k != rows[erase_row_cnt]) {
row_ptr_new[row_cnt] = _row_ptr[k+1] - _row_ptr[k];
row_cnt++;
} else {
erase_row_cnt++;
}
} else {
row_ptr_new[row_cnt] = _row_ptr[k+1] - _row_ptr[k];
row_cnt++;
}
}
//*** sum up the entries
for (IDX_TYPE k(0); k<n_new_rows; k++) {
row_ptr_new[k+1] = row_ptr_new[k+1] + row_ptr_new[k];
}
//*** create new memory for col_idx and data
IDX_TYPE nnz_new(row_ptr_new[n_new_rows]);
IDX_TYPE *col_idx_new (new IDX_TYPE[nnz_new]);
FP_TYPE *data_new (new FP_TYPE[nnz_new]);
//*** copy the entries
// initialization
IDX_TYPE *row_ptr_new_tmp(new IDX_TYPE[n_new_rows+1]);
for (IDX_TYPE k(0); k<=n_new_rows; k++) {
row_ptr_new_tmp[k] = row_ptr_new[k];
}
erase_row_cnt = 0;
row_cnt = 0;
// copy column index and data entries
for (IDX_TYPE k(0); k<MatrixBase<IDX_TYPE>::_n_rows; k++) {
if (erase_row_cnt < n_rows_cols) {
if (k != rows[erase_row_cnt]) {
const IDX_TYPE end (_row_ptr[k+1]);
// walk through row
for (IDX_TYPE j(_row_ptr[k]); j<end; j++) {
col_idx_new[row_ptr_new_tmp[row_cnt]] = _col_idx[j];
data_new[row_ptr_new_tmp[row_cnt]] = _data[j];
row_ptr_new_tmp[row_cnt]++;
}
row_cnt++;
} else {
erase_row_cnt++;
}
} else {
const IDX_TYPE end (_row_ptr[k+1]);
// walk through row
for (IDX_TYPE j(_row_ptr[k]); j<end; j++) {
col_idx_new[row_ptr_new_tmp[row_cnt]] = _col_idx[j];
data_new[row_ptr_new_tmp[row_cnt]] = _data[j];
row_ptr_new_tmp[row_cnt]++;
}
row_cnt++;
}
}
MatrixBase<IDX_TYPE>::_n_rows -= n_rows_cols;
std::swap (row_ptr_new, _row_ptr);
std::swap (col_idx_new, _col_idx);
std::swap (data_new, _data);
delete [] row_ptr_new_tmp;
delete [] row_ptr_new;
delete [] col_idx_new;
delete [] data_new;
}
void transpose (IDX_TYPE n_cols)
{
// create a helper array row_ptr_nnz
IDX_TYPE *row_ptr_nnz(new IDX_TYPE[n_cols+1]);
for (IDX_TYPE k(0); k <= n_cols; k++) {
row_ptr_nnz[k] = 0;
}
// count entries per row in the transposed matrix
IDX_TYPE nnz(_row_ptr[MatrixBase<IDX_TYPE>::_n_rows]);
for (IDX_TYPE k(0); k < nnz; k++) {
row_ptr_nnz[_col_idx[k]]++;
}
// create row_ptr_trans
IDX_TYPE *row_ptr_trans(new IDX_TYPE[n_cols + 1]);
row_ptr_trans[0] = 0;
for (IDX_TYPE k(0); k < n_cols; k++) {
row_ptr_trans[k+1] = row_ptr_trans[k] + row_ptr_nnz[k];
}
// make a copy of row_ptr_trans
for (IDX_TYPE k(0); k <= n_cols; k++) {
row_ptr_nnz[k] = row_ptr_trans[k];
}
// create arrays col_idx_trans and data_trans
assert(nnz == row_ptr_trans[n_cols]);
IDX_TYPE *col_idx_trans(new IDX_TYPE[nnz]);
FP_TYPE *data_trans(new FP_TYPE[nnz]);
// fill arrays col_idx_trans and data_trans
for (IDX_TYPE i(0); i < MatrixBase<IDX_TYPE>::_n_rows; i++) {
const IDX_TYPE row_end(_row_ptr[i + 1]);
for (IDX_TYPE j(_row_ptr[i]); j < row_end; j++) {
const IDX_TYPE k(_col_idx[j]);
col_idx_trans[row_ptr_nnz[k]] = i;
data_trans[row_ptr_nnz[k]] = _data[j];
row_ptr_nnz[k]++;
}
}
MatrixBase<IDX_TYPE>::_n_rows = n_cols;
std::swap(row_ptr_trans, _row_ptr);
std::swap(col_idx_trans, _col_idx);
std::swap(data_trans, _data);
delete[] row_ptr_nnz;
delete[] row_ptr_trans;
delete[] col_idx_trans;
delete[] data_trans;
}
IDX_TYPE *_row_ptr;
IDX_TYPE *_col_idx;
FP_TYPE* _data;
private:
DISALLOW_COPY_AND_ASSIGN(CRSMatrix);
};
#ifdef MSVC_VER
#pragma warning(pop)
#endif
} // end namespace MathLib
#endif
|
convolution_sgemm_pack16to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack16to4_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 64u, 16, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 64u, 16, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator);
{
int nn_size = size / 8;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x8
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
img0 += size * 16;
tmpptr += 128;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x4
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
img0 += size * 16;
tmpptr += 64;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_load_ps(img0);
_mm512_store_ps(tmpptr, _val);
img0 += size * 16;
tmpptr += 16;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 4);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 16;
}
_mm_store_ps(outptr0, _mm512_extractf32x4_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm512_extractf32x4_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm512_extractf32x4_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm512_extractf32x4_ps(_sum3, 0));
_mm_store_ps(outptr0 + 16, _mm512_extractf32x4_ps(_sum4, 0));
_mm_store_ps(outptr0 + 20, _mm512_extractf32x4_ps(_sum5, 0));
_mm_store_ps(outptr0 + 24, _mm512_extractf32x4_ps(_sum6, 0));
_mm_store_ps(outptr0 + 28, _mm512_extractf32x4_ps(_sum7, 0));
_mm_store_ps(outptr1, _mm512_extractf32x4_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm512_extractf32x4_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm512_extractf32x4_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm512_extractf32x4_ps(_sum3, 1));
_mm_store_ps(outptr1 + 16, _mm512_extractf32x4_ps(_sum4, 1));
_mm_store_ps(outptr1 + 20, _mm512_extractf32x4_ps(_sum5, 1));
_mm_store_ps(outptr1 + 24, _mm512_extractf32x4_ps(_sum6, 1));
_mm_store_ps(outptr1 + 28, _mm512_extractf32x4_ps(_sum7, 1));
_mm_store_ps(outptr2, _mm512_extractf32x4_ps(_sum0, 2));
_mm_store_ps(outptr2 + 4, _mm512_extractf32x4_ps(_sum1, 2));
_mm_store_ps(outptr2 + 8, _mm512_extractf32x4_ps(_sum2, 2));
_mm_store_ps(outptr2 + 12, _mm512_extractf32x4_ps(_sum3, 2));
_mm_store_ps(outptr2 + 16, _mm512_extractf32x4_ps(_sum4, 2));
_mm_store_ps(outptr2 + 20, _mm512_extractf32x4_ps(_sum5, 2));
_mm_store_ps(outptr2 + 24, _mm512_extractf32x4_ps(_sum6, 2));
_mm_store_ps(outptr2 + 28, _mm512_extractf32x4_ps(_sum7, 2));
_mm_store_ps(outptr3, _mm512_extractf32x4_ps(_sum0, 3));
_mm_store_ps(outptr3 + 4, _mm512_extractf32x4_ps(_sum1, 3));
_mm_store_ps(outptr3 + 8, _mm512_extractf32x4_ps(_sum2, 3));
_mm_store_ps(outptr3 + 12, _mm512_extractf32x4_ps(_sum3, 3));
_mm_store_ps(outptr3 + 16, _mm512_extractf32x4_ps(_sum4, 3));
_mm_store_ps(outptr3 + 20, _mm512_extractf32x4_ps(_sum5, 3));
_mm_store_ps(outptr3 + 24, _mm512_extractf32x4_ps(_sum6, 3));
_mm_store_ps(outptr3 + 28, _mm512_extractf32x4_ps(_sum7, 3));
outptr0 += 32;
outptr1 += 32;
outptr2 += 32;
outptr3 += 32;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 4);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 16;
}
_mm_store_ps(outptr0, _mm512_extractf32x4_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm512_extractf32x4_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm512_extractf32x4_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm512_extractf32x4_ps(_sum3, 0));
_mm_store_ps(outptr1, _mm512_extractf32x4_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm512_extractf32x4_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm512_extractf32x4_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm512_extractf32x4_ps(_sum3, 1));
_mm_store_ps(outptr2, _mm512_extractf32x4_ps(_sum0, 2));
_mm_store_ps(outptr2 + 4, _mm512_extractf32x4_ps(_sum1, 2));
_mm_store_ps(outptr2 + 8, _mm512_extractf32x4_ps(_sum2, 2));
_mm_store_ps(outptr2 + 12, _mm512_extractf32x4_ps(_sum3, 2));
_mm_store_ps(outptr3, _mm512_extractf32x4_ps(_sum0, 3));
_mm_store_ps(outptr3 + 4, _mm512_extractf32x4_ps(_sum1, 3));
_mm_store_ps(outptr3 + 8, _mm512_extractf32x4_ps(_sum2, 3));
_mm_store_ps(outptr3 + 12, _mm512_extractf32x4_ps(_sum3, 3));
outptr0 += 16;
outptr1 += 16;
outptr2 += 16;
outptr3 += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 4);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum = _mm512_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 16;
}
_mm_store_ps(outptr0, _mm512_extractf32x4_ps(_sum, 0));
_mm_store_ps(outptr1, _mm512_extractf32x4_ps(_sum, 1));
_mm_store_ps(outptr2, _mm512_extractf32x4_ps(_sum, 2));
_mm_store_ps(outptr3, _mm512_extractf32x4_ps(_sum, 3));
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
}
remain_outch_start += nn_outch << 2;
nn_outch = (outch - remain_outch_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
__m256 _sum4 = _sum0;
__m256 _sum5 = _sum0;
__m256 _sum6 = _sum0;
__m256 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
_sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
_sum4 = _mm256_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
_sum6 = _mm256_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm256_extractf128_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm256_extractf128_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm256_extractf128_ps(_sum3, 0));
_mm_store_ps(outptr0 + 16, _mm256_extractf128_ps(_sum4, 0));
_mm_store_ps(outptr0 + 20, _mm256_extractf128_ps(_sum5, 0));
_mm_store_ps(outptr0 + 24, _mm256_extractf128_ps(_sum6, 0));
_mm_store_ps(outptr0 + 28, _mm256_extractf128_ps(_sum7, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm256_extractf128_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm256_extractf128_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm256_extractf128_ps(_sum3, 1));
_mm_store_ps(outptr1 + 16, _mm256_extractf128_ps(_sum4, 1));
_mm_store_ps(outptr1 + 20, _mm256_extractf128_ps(_sum5, 1));
_mm_store_ps(outptr1 + 24, _mm256_extractf128_ps(_sum6, 1));
_mm_store_ps(outptr1 + 28, _mm256_extractf128_ps(_sum7, 1));
outptr0 += 32;
outptr1 += 32;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
_sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm256_extractf128_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm256_extractf128_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm256_extractf128_ps(_sum3, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm256_extractf128_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm256_extractf128_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm256_extractf128_ps(_sum3, 1));
outptr0 += 16;
outptr1 += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum = _mm256_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
_sum = _mm256_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum, 1));
outptr0 += 4;
outptr1 += 4;
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _sum0;
__m128 _sum2 = _sum0;
__m128 _sum3 = _sum0;
__m128 _sum4 = _sum0;
__m128 _sum5 = _sum0;
__m128 _sum6 = _sum0;
__m128 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
__m128 _val4 = _mm_load1_ps(tmpptr + 4);
__m128 _val5 = _mm_load1_ps(tmpptr + 5);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
__m128 _val6 = _mm_load1_ps(tmpptr + 6);
__m128 _val7 = _mm_load1_ps(tmpptr + 7);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 4;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
_mm_store_ps(outptr0 + 16, _sum4);
_mm_store_ps(outptr0 + 20, _sum5);
_mm_store_ps(outptr0 + 24, _sum6);
_mm_store_ps(outptr0 + 28, _sum7);
outptr0 += 32;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _sum0;
__m128 _sum2 = _sum0;
__m128 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 4;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 4 + (p % 4) / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m128 _sum = _mm_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 4;
}
_mm_store_ps(outptr0, _sum);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack16to4_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 16b-16a-maxk-inch/16a-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(16 * 16 * maxk, inch / 16, outch / 16 + (outch % 16) / 8 + (outch % 8) / 4, (size_t)4u);
int q = 0;
for (; q + 15 < outch; q += 16)
{
float* g00 = kernel_tm.channel(q / 16);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q + 7 < outch; q += 8)
{
float* g00 = kernel_tm.channel(q / 16 + (q % 16) / 8);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q + 3 < outch; q += 4)
{
float* g00 = kernel_tm.channel(q / 16 + (q % 16) / 8 + (q % 8) / 4);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack16to4_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _v = _mm512_load_ps(sptr);
_mm512_store_ps(ptr, _v);
sptr += stride_w * 16;
ptr += 16;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack16to4_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
lu.c |
/*[]*/
typedef long long __int64_t;
/*[]*/
typedef __int64_t __darwin_off_t;
/*[]*/
typedef __darwin_off_t fpos_t;
/*[]*/
struct __sbuf {
unsigned char *_base;
int _size;
} ;
/*[]*/
struct __sFILEX ;
/*[]*/
struct __sFILE {
unsigned char *_p;
int _r;
int _w;
short _flags;
short _file;
struct __sbuf _bf;
int _lbfsize;
void *_cookie;
int ( *_close )(void *);
int ( *_read )(void *, char * , int );
fpos_t ( *_seek )(void *, fpos_t , int );
int ( *_write )(void *, const char * , int );
struct __sbuf _ub;
struct __sFILEX *_extra;
int _ur;
unsigned char _ubuf[3];
unsigned char _nbuf[1];
struct __sbuf _lb;
int _blksize;
fpos_t _offset;
} ;
/*[]*/
typedef struct __sFILE FILE;
/*[]*/
int fclose(FILE *);
/*[]*/
int fgetc(FILE *);
/*[]*/
FILE *fopen(const char *restrict __filename, const char *restrict __mode);
/*[]*/
int fscanf(FILE *restrict , const char *restrict , ...);
/*[]*/
int printf(const char *restrict , ...);
/*[]*/
void exit(int );
/*[]*/
extern double fabs(double );
/*[]*/
extern double sqrt(double );
/*[]*/
extern int omp_get_num_threads(void );
/*[]*/
typedef int boolean;
/*[]*/
extern void timer_clear(int );
/*[]*/
extern void timer_start(int );
/*[]*/
extern void timer_stop(int );
/*[]*/
extern double timer_read(int );
/*[]*/
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
/*[]*/
static int nx;
/*[]*/
static int ny;
/*[]*/
static int nz;
/*[]*/
static int nx0;
/*[]*/
static int ny0;
/*[]*/
static int nz0;
/*[]*/
static int ist;
/*[]*/
static int iend;
/*[]*/
static int jst;
/*[]*/
static int jend;
/*[]*/
static int ii1;
/*[]*/
static int ii2;
/*[]*/
static int ji1;
/*[]*/
static int ji2;
/*[]*/
static int ki1;
/*[]*/
static int ki2;
/*[]*/
static double dxi;
/*[]*/
static double deta;
/*[]*/
static double dzeta;
/*[]*/
static double tx1;
/*[]*/
static double tx2;
/*[]*/
static double tx3;
/*[]*/
static double ty1;
/*[]*/
static double ty2;
/*[]*/
static double ty3;
/*[]*/
static double tz1;
/*[]*/
static double tz2;
/*[]*/
static double tz3;
/*[]*/
static double dx1;
/*[]*/
static double dx2;
/*[]*/
static double dx3;
/*[]*/
static double dx4;
/*[]*/
static double dx5;
/*[]*/
static double dy1;
/*[]*/
static double dy2;
/*[]*/
static double dy3;
/*[]*/
static double dy4;
/*[]*/
static double dy5;
/*[]*/
static double dz1;
/*[]*/
static double dz2;
/*[]*/
static double dz3;
/*[]*/
static double dz4;
/*[]*/
static double dz5;
/*[]*/
static double dssp;
/*[]*/
static double u[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double rsd[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double frct[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double flux[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static int ipr;
/*[]*/
static int inorm;
/*[]*/
static int itmax;
/*[]*/
static double dt;
/*[]*/
static double omega;
/*[]*/
static double tolrsd[5];
/*[]*/
static double rsdnm[5];
/*[]*/
static double errnm[5];
/*[]*/
static double frc;
/*[]*/
static double a[12][12][5][5];
/*[]*/
static double b[12][12][5][5];
/*[]*/
static double c[12][12][5][5];
/*[]*/
static double d[12][12][5][5];
/*[]*/
static double ce[5][13];
/*[]*/
static double maxtime;
/*[]*/
static boolean flag[12 / 2 * 2 + 1];
/*[]*/
static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0);
/*[]*/
static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0);
/*[]*/
static void domain(void );
/*[]*/
static void erhs(void );
/*[]*/
static void error(void );
/*[]*/
static void exact(int i, int j , int k , double u000ijk[5]);
/*[]*/
static void jacld(int k);
/*[]*/
static void jacu(int k);
/*[]*/
static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]);
/*[]*/
static void pintgr(void );
/*[]*/
static void read_input(void );
/*[]*/
static void rhs(void );
/*[]*/
static void setbv(void );
/*[]*/
static void setcoeff(void );
/*[]*/
static void setiv(void );
/*[]*/
static void ssor(void );
/*[]*/
static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified);
/*[]*/
/*[]*/
/*[]*/
int main(int argc, char **argv) {
/*[]*/
/*[]*/
char class;
/*[]*/
boolean verified;
/*[]*/
double mflops;
/*[]*/
int nthreads = 1;
/*[]*/
read_input();
/*[]*/
/*[]*/
domain();
/*[]*/
/*[]*/
setcoeff();
/*[]*/
/*[1]*/
#pragma omp parallel
{
/*[1]*/
/*[1]*/
int i;
/*[1]*/
int j;
/*[1]*/
int k;
/*[1]*/
int iglob;
/*[1]*/
int jglob;
/*[1]*/
#pragma omp for nowait
/*[1]*/
/*[1]*/
/*[1]*/
for (i = 0; i < nx; i++) {
/*[1]*/
/*[1]*/
iglob = i;
/*[1]*/
/*[1]*/
/*[1]*/
/*[1]*/
for (j = 0; j < ny; j++) {
/*[1]*/
/*[1]*/
jglob = j;
/*[1]*/
double *_imopVarPre239;
/*[1]*/
_imopVarPre239 = &u[i][j][0][0];
/*[1]*/
exact(iglob, jglob, 0, _imopVarPre239);
/*[1]*/
/*[1]*/
double *_imopVarPre242;
/*[1]*/
int _imopVarPre243;
/*[1]*/
_imopVarPre242 = &u[i][j][nz - 1][0];
/*[1]*/
_imopVarPre243 = nz - 1;
/*[1]*/
exact(iglob, jglob, _imopVarPre243, _imopVarPre242);
/*[1]*/
}
}
/*[1]*/
// #pragma omp dummyFlush BARRIER_START
/*[1]*/
#pragma omp barrier
/*[2]*/
#pragma omp for nowait
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i < nx; i++) {
/*[2]*/
/*[2]*/
iglob = i;
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (k = 0; k < nz; k++) {
/*[2]*/
/*[2]*/
double *_imopVarPre245;
/*[2]*/
_imopVarPre245 = &u[i][0][k][0];
/*[2]*/
exact(iglob, 0, k, _imopVarPre245);
/*[2]*/
}
}
/*[2]*/
// #pragma omp dummyFlush BARRIER_START
/*[2]*/
#pragma omp barrier
/*[3]*/
#pragma omp for nowait
/*[3]*/
/*[3]*/
/*[3]*/
for (i = 0; i < nx; i++) {
/*[3]*/
/*[3]*/
iglob = i;
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (k = 0; k < nz; k++) {
/*[3]*/
/*[3]*/
double *_imopVarPre248;
/*[3]*/
int _imopVarPre249;
/*[3]*/
_imopVarPre248 = &u[i][ny - 1][k][0];
/*[3]*/
_imopVarPre249 = ny0 - 1;
/*[3]*/
exact(iglob, _imopVarPre249, k, _imopVarPre248);
/*[3]*/
}
}
/*[3]*/
// #pragma omp dummyFlush BARRIER_START
/*[3]*/
#pragma omp barrier
/*[4]*/
#pragma omp for nowait
/*[4]*/
/*[4]*/
/*[4]*/
for (j = 0; j < ny; j++) {
/*[4]*/
/*[4]*/
jglob = j;
/*[4]*/
/*[4]*/
/*[4]*/
/*[4]*/
for (k = 0; k < nz; k++) {
/*[4]*/
/*[4]*/
double *_imopVarPre251;
/*[4]*/
_imopVarPre251 = &u[0][j][k][0];
/*[4]*/
exact(0, jglob, k, _imopVarPre251);
/*[4]*/
}
}
/*[4]*/
// #pragma omp dummyFlush BARRIER_START
/*[4]*/
#pragma omp barrier
/*[5]*/
#pragma omp for nowait
/*[5]*/
/*[5]*/
/*[5]*/
for (j = 0; j < ny; j++) {
/*[5]*/
/*[5]*/
jglob = j;
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (k = 0; k < nz; k++) {
/*[5]*/
/*[5]*/
double *_imopVarPre254;
/*[5]*/
int _imopVarPre255;
/*[5]*/
_imopVarPre254 = &u[nx - 1][j][k][0];
/*[5]*/
_imopVarPre255 = nx0 - 1;
/*[5]*/
exact(_imopVarPre255, jglob, k, _imopVarPre254);
/*[5]*/
}
}
}
/*[6]*/
#pragma omp parallel
{
/*[6]*/
/*[6]*/
int i;
/*[6]*/
int j;
/*[6]*/
int k;
/*[6]*/
int m;
/*[6]*/
int iglob;
/*[6]*/
int jglob;
/*[6]*/
double xi;
/*[6]*/
double eta;
/*[6]*/
double zeta;
/*[6]*/
double pxi;
/*[6]*/
double peta;
/*[6]*/
double pzeta;
/*[6]*/
double ue_1jk[5];
/*[6]*/
double ue_nx0jk[5];
/*[6]*/
double ue_i1k[5];
/*[6]*/
double ue_iny0k[5];
/*[6]*/
double ue_ij1[5];
/*[6]*/
double ue_ijnz[5];
/*[6]*/
#pragma omp for nowait
/*[6]*/
/*[6]*/
/*[6]*/
for (j = 0; j < ny; j++) {
/*[6]*/
/*[6]*/
jglob = j;
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (k = 1; k < nz - 1; k++) {
/*[6]*/
/*[6]*/
zeta = ((double) k) / (nz - 1);
/*[6]*/
int _imopVarPre361;
/*[6]*/
_imopVarPre361 = jglob != 0;
/*[6]*/
/*[6]*/
if (_imopVarPre361) {
/*[6]*/
/*[6]*/
_imopVarPre361 = jglob != ny0 - 1;
}
/*[6]*/
/*[6]*/
if (_imopVarPre361) {
/*[6]*/
/*[6]*/
eta = ((double) jglob) / (ny0 - 1);
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (i = 0; i < nx; i++) {
/*[6]*/
/*[6]*/
iglob = i;
/*[6]*/
int _imopVarPre363;
/*[6]*/
_imopVarPre363 = iglob != 0;
/*[6]*/
/*[6]*/
if (_imopVarPre363) {
/*[6]*/
/*[6]*/
_imopVarPre363 = iglob != nx0 - 1;
}
/*[6]*/
/*[6]*/
if (_imopVarPre363) {
/*[6]*/
/*[6]*/
xi = ((double) iglob) / (nx0 - 1);
/*[6]*/
exact(0, jglob, k, ue_1jk);
/*[6]*/
/*[6]*/
int _imopVarPre365;
/*[6]*/
_imopVarPre365 = nx0 - 1;
/*[6]*/
exact(_imopVarPre365, jglob, k, ue_nx0jk);
/*[6]*/
/*[6]*/
exact(iglob, 0, k, ue_i1k);
/*[6]*/
/*[6]*/
int _imopVarPre367;
/*[6]*/
_imopVarPre367 = ny0 - 1;
/*[6]*/
exact(iglob, _imopVarPre367, k, ue_iny0k);
/*[6]*/
/*[6]*/
exact(iglob, jglob, 0, ue_ij1);
/*[6]*/
/*[6]*/
int _imopVarPre369;
/*[6]*/
_imopVarPre369 = nz - 1;
/*[6]*/
exact(iglob, jglob, _imopVarPre369, ue_ijnz);
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (m = 0; m < 5; m++) {
/*[6]*/
/*[6]*/
pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m];
/*[6]*/
peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m];
/*[6]*/
pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m];
/*[6]*/
u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta;
}
}
}
}
}
}
}
/*[6, 7]*/
#pragma omp parallel
{
/*[6, 7]*/
/*[6, 7]*/
int i;
/*[6, 7]*/
int j;
/*[6, 7]*/
int k;
/*[6, 7]*/
int m;
/*[6, 7]*/
int iglob;
/*[6, 7]*/
int jglob;
/*[6, 7]*/
int L1;
/*[6, 7]*/
int L2;
/*[6, 7]*/
int ist1;
/*[6, 7]*/
int iend1;
/*[6, 7]*/
int jst1;
/*[6, 7]*/
int jend1;
/*[6, 7]*/
double dsspm;
/*[6, 7]*/
double xi;
/*[6, 7]*/
double eta;
/*[6, 7]*/
double zeta;
/*[6, 7]*/
double q;
/*[6, 7]*/
double u21;
/*[6, 7]*/
double u31;
/*[6, 7]*/
double u41;
/*[6, 7]*/
double tmp;
/*[6, 7]*/
double u21i;
/*[6, 7]*/
double u31i;
/*[6, 7]*/
double u41i;
/*[6, 7]*/
double u51i;
/*[6, 7]*/
double u21j;
/*[6, 7]*/
double u31j;
/*[6, 7]*/
double u41j;
/*[6, 7]*/
double u51j;
/*[6, 7]*/
double u21k;
/*[6, 7]*/
double u31k;
/*[6, 7]*/
double u41k;
/*[6, 7]*/
double u51k;
/*[6, 7]*/
double u21im1;
/*[6, 7]*/
double u31im1;
/*[6, 7]*/
double u41im1;
/*[6, 7]*/
double u51im1;
/*[6, 7]*/
double u21jm1;
/*[6, 7]*/
double u31jm1;
/*[6, 7]*/
double u41jm1;
/*[6, 7]*/
double u51jm1;
/*[6, 7]*/
double u21km1;
/*[6, 7]*/
double u31km1;
/*[6, 7]*/
double u41km1;
/*[6, 7]*/
double u51km1;
/*[6, 7]*/
dsspm = dssp;
/*[6, 7]*/
#pragma omp for nowait
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (i = 0; i < nx; i++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (j = 0; j < ny; j++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (k = 0; k < nz; k++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (m = 0; m < 5; m++) {
/*[6, 7]*/
/*[6, 7]*/
frct[i][j][k][m] = 0.0;
}
}
}
}
/*[6, 7]*/
#pragma omp for nowait
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (i = 0; i < nx; i++) {
/*[6, 7]*/
/*[6, 7]*/
iglob = i;
/*[6, 7]*/
xi = ((double) iglob) / (nx0 - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (j = 0; j < ny; j++) {
/*[6, 7]*/
/*[6, 7]*/
jglob = j;
/*[6, 7]*/
eta = ((double) jglob) / (ny0 - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (k = 0; k < nz; k++) {
/*[6, 7]*/
/*[6, 7]*/
zeta = ((double) k) / (nz - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (m = 0; m < 5; m++) {
/*[6, 7]*/
/*[6, 7]*/
rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*[6, 7]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 7]*/
#pragma omp barrier
/*[6, 8]*/
L1 = 0;
/*[6, 8]*/
L2 = nx - 1;
/*[6, 8]*/
#pragma omp for nowait
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (i = L1; i <= L2; i++) {
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (j = jst; j <= jend; j++) {
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (k = 1; k < nz - 1; k++) {
/*[6, 8]*/
/*[6, 8]*/
flux[i][j][k][0] = rsd[i][j][k][1];
/*[6, 8]*/
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
/*[6, 8]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 8]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 8]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
/*[6, 8]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
/*[6, 8]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[6, 8]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 8]*/
#pragma omp barrier
/*[6, 9]*/
#pragma omp for nowait
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (j = jst; j <= jend; j++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= iend; i++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= L2; i++) {
/*[6, 9]*/
/*[6, 9]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 9]*/
u21i = tmp * rsd[i][j][k][1];
/*[6, 9]*/
u31i = tmp * rsd[i][j][k][2];
/*[6, 9]*/
u41i = tmp * rsd[i][j][k][3];
/*[6, 9]*/
u51i = tmp * rsd[i][j][k][4];
/*[6, 9]*/
tmp = 1.0 / rsd[i - 1][j][k][0];
/*[6, 9]*/
u21im1 = tmp * rsd[i - 1][j][k][1];
/*[6, 9]*/
u31im1 = tmp * rsd[i - 1][j][k][2];
/*[6, 9]*/
u41im1 = tmp * rsd[i - 1][j][k][3];
/*[6, 9]*/
u51im1 = tmp * rsd[i - 1][j][k][4];
/*[6, 9]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[6, 9]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[6, 9]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[6, 9]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= iend; i++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);
/*[6, 9]*/
frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);
/*[6, 9]*/
frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);
/*[6, 9]*/
frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);
/*[6, 9]*/
frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);
/*[6, 9]*/
frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);
}
/*[6, 9]*/
ist1 = 3;
/*[6, 9]*/
iend1 = nx - 4;
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist1; i <= iend1; i++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);
}
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);
/*[6, 9]*/
frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);
}
}
}
/*[6, 9]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 9]*/
#pragma omp barrier
/*[6, 10]*/
L1 = 0;
/*[6, 10]*/
L2 = ny - 1;
/*[6, 10]*/
#pragma omp for nowait
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (i = ist; i <= iend; i++) {
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (j = L1; j <= L2; j++) {
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 10]*/
/*[6, 10]*/
flux[i][j][k][0] = rsd[i][j][k][2];
/*[6, 10]*/
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
/*[6, 10]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 10]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
/*[6, 10]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 10]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
/*[6, 10]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[6, 10]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 10]*/
#pragma omp barrier
/*[6, 11]*/
#pragma omp for nowait
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (i = ist; i <= iend; i++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= jend; j++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= L2; j++) {
/*[6, 11]*/
/*[6, 11]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 11]*/
u21j = tmp * rsd[i][j][k][1];
/*[6, 11]*/
u31j = tmp * rsd[i][j][k][2];
/*[6, 11]*/
u41j = tmp * rsd[i][j][k][3];
/*[6, 11]*/
u51j = tmp * rsd[i][j][k][4];
/*[6, 11]*/
tmp = 1.0 / rsd[i][j - 1][k][0];
/*[6, 11]*/
u21jm1 = tmp * rsd[i][j - 1][k][1];
/*[6, 11]*/
u31jm1 = tmp * rsd[i][j - 1][k][2];
/*[6, 11]*/
u41jm1 = tmp * rsd[i][j - 1][k][3];
/*[6, 11]*/
u51jm1 = tmp * rsd[i][j - 1][k][4];
/*[6, 11]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[6, 11]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[6, 11]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[6, 11]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= jend; j++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);
/*[6, 11]*/
frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);
/*[6, 11]*/
frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);
/*[6, 11]*/
frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);
/*[6, 11]*/
frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);
/*[6, 11]*/
frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);
}
/*[6, 11]*/
jst1 = 3;
/*[6, 11]*/
jend1 = ny - 4;
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst1; j <= jend1; j++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);
}
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);
/*[6, 11]*/
frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);
}
}
}
/*[6, 11]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 11]*/
#pragma omp barrier
/*[6, 12]*/
#pragma omp for nowait
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (i = ist; i <= iend; i++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (j = jst; j <= jend; j++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 0; k <= nz - 1; k++) {
/*[6, 12]*/
/*[6, 12]*/
flux[i][j][k][0] = rsd[i][j][k][3];
/*[6, 12]*/
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
/*[6, 12]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 12]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
/*[6, 12]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
/*[6, 12]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 12]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 1; k++) {
/*[6, 12]*/
/*[6, 12]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 12]*/
u21k = tmp * rsd[i][j][k][1];
/*[6, 12]*/
u31k = tmp * rsd[i][j][k][2];
/*[6, 12]*/
u41k = tmp * rsd[i][j][k][3];
/*[6, 12]*/
u51k = tmp * rsd[i][j][k][4];
/*[6, 12]*/
tmp = 1.0 / rsd[i][j][k - 1][0];
/*[6, 12]*/
u21km1 = tmp * rsd[i][j][k - 1][1];
/*[6, 12]*/
u31km1 = tmp * rsd[i][j][k - 1][2];
/*[6, 12]*/
u41km1 = tmp * rsd[i][j][k - 1][3];
/*[6, 12]*/
u51km1 = tmp * rsd[i][j][k - 1][4];
/*[6, 12]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[6, 12]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[6, 12]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[6, 12]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);
/*[6, 12]*/
frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);
/*[6, 12]*/
frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);
/*[6, 12]*/
frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);
/*[6, 12]*/
frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);
/*[6, 12]*/
frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 3; k <= nz - 4; k++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);
}
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);
/*[6, 12]*/
frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);
}
}
}
}
/*[13]*/
#pragma omp parallel
{
/*[13]*/
/*[13]*/
#pragma omp master
{
/*[13]*/
/*[13]*/
nthreads = omp_get_num_threads();
/*[13]*/
}
}
/*[13]*/
int i;
/*[13]*/
int j;
/*[13]*/
int k;
/*[13]*/
int m;
/*[13]*/
int istep;
/*[13]*/
double tmp;
/*[13]*/
double delunm[5];
/*[13]*/
double tv[12][12][5];
/*[13]*/
tmp = 1.0 / (omega * (2.0 - omega));
/*[13, 14]*/
#pragma omp parallel private(i, j, k, m)
{
/*[13, 14]*/
/*[13, 14]*/
#pragma omp for nowait
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (i = 0; i < 12; i++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (j = 0; j < 12; j++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (k = 0; k < 5; k++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (m = 0; m < 5; m++) {
/*[13, 14]*/
/*[13, 14]*/
a[i][j][k][m] = 0.0;
/*[13, 14]*/
b[i][j][k][m] = 0.0;
/*[13, 14]*/
c[i][j][k][m] = 0.0;
/*[13, 14]*/
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*[13, 14, 15]*/
#pragma omp parallel
{
/*[13, 14, 15]*/
/*[13, 14, 15]*/
int i_imopVarPre84;
/*[13, 14, 15]*/
int j_imopVarPre85;
/*[13, 14, 15]*/
int k_imopVarPre86;
/*[13, 14, 15]*/
int m_imopVarPre87;
/*[13, 14, 15]*/
int L1;
/*[13, 14, 15]*/
int L2;
/*[13, 14, 15]*/
int ist1;
/*[13, 14, 15]*/
int iend1;
/*[13, 14, 15]*/
int jst1;
/*[13, 14, 15]*/
int jend1;
/*[13, 14, 15]*/
double q;
/*[13, 14, 15]*/
double u21;
/*[13, 14, 15]*/
double u31;
/*[13, 14, 15]*/
double u41;
/*[13, 14, 15]*/
double tmp_imopVarPre88;
/*[13, 14, 15]*/
double u21i;
/*[13, 14, 15]*/
double u31i;
/*[13, 14, 15]*/
double u41i;
/*[13, 14, 15]*/
double u51i;
/*[13, 14, 15]*/
double u21j;
/*[13, 14, 15]*/
double u31j;
/*[13, 14, 15]*/
double u41j;
/*[13, 14, 15]*/
double u51j;
/*[13, 14, 15]*/
double u21k;
/*[13, 14, 15]*/
double u31k;
/*[13, 14, 15]*/
double u41k;
/*[13, 14, 15]*/
double u51k;
/*[13, 14, 15]*/
double u21im1;
/*[13, 14, 15]*/
double u31im1;
/*[13, 14, 15]*/
double u41im1;
/*[13, 14, 15]*/
double u51im1;
/*[13, 14, 15]*/
double u21jm1;
/*[13, 14, 15]*/
double u31jm1;
/*[13, 14, 15]*/
double u41jm1;
/*[13, 14, 15]*/
double u51jm1;
/*[13, 14, 15]*/
double u21km1;
/*[13, 14, 15]*/
double u31km1;
/*[13, 14, 15]*/
double u41km1;
/*[13, 14, 15]*/
double u51km1;
/*[13, 14, 15]*/
#pragma omp for nowait
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87];
}
}
}
}
/*[13, 14, 15]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 15]*/
#pragma omp barrier
/*[13, 14, 16]*/
L1 = 0;
/*[13, 14, 16]*/
L2 = nx - 1;
/*[13, 14, 16]*/
#pragma omp for nowait
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 16]*/
u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 16]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21;
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21;
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21;
}
}
}
/*[13, 14, 16]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 16]*/
#pragma omp barrier
/*[13, 14, 17]*/
#pragma omp for nowait
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 17]*/
L2 = nx - 1;
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 17]*/
u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 17]*/
u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 17]*/
u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 17]*/
u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 17]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 17]*/
u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 17]*/
u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 17]*/
u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 17]*/
u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]);
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 17]*/
rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
/*[13, 14, 17]*/
ist1 = 3;
/*[13, 14, 17]*/
iend1 = nx - 4;
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 17]*/
rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[13, 14, 17]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 17]*/
#pragma omp barrier
/*[13, 14, 18]*/
L1 = 0;
/*[13, 14, 18]*/
L2 = ny - 1;
/*[13, 14, 18]*/
#pragma omp for nowait
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 18]*/
u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 18]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31;
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31;
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31;
}
}
}
/*[13, 14, 18]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 18]*/
#pragma omp barrier
/*[13, 14, 19]*/
#pragma omp for nowait
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 19]*/
L2 = ny - 1;
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 19]*/
u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 19]*/
u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 19]*/
u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 19]*/
u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 19]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0];
/*[13, 14, 19]*/
u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1];
/*[13, 14, 19]*/
u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2];
/*[13, 14, 19]*/
u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3];
/*[13, 14, 19]*/
u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4];
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]);
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]);
}
/*[13, 14, 19]*/
jst1 = 3;
/*[13, 14, 19]*/
jend1 = ny - 4;
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[13, 14, 19]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 19]*/
#pragma omp barrier
/*[13, 14, 20]*/
#pragma omp for nowait
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 20]*/
u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41;
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41;
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41;
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]);
}
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 20]*/
u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 20]*/
u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 20]*/
u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 20]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0];
/*[13, 14, 20]*/
u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1];
/*[13, 14, 20]*/
u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2];
/*[13, 14, 20]*/
u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3];
/*[13, 14, 20]*/
u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4];
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]);
}
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
}
}
}
}
/*[13, 14, 21]*/
#pragma omp parallel
{
/*[13, 14, 21]*/
/*[13, 14, 21]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 21]*/
double *sum;
/*[13, 14, 21]*/
v = rsd;
/*[13, 14, 21]*/
sum = rsdnm;
/*[13, 14, 21]*/
int i_imopVarPre75;
/*[13, 14, 21]*/
int j_imopVarPre76;
/*[13, 14, 21]*/
int k_imopVarPre77;
/*[13, 14, 21]*/
int m_imopVarPre78;
/*[13, 14, 21]*/
double sum0 = 0.0;
/*[13, 14, 21]*/
double sum1 = 0.0;
/*[13, 14, 21]*/
double sum2 = 0.0;
/*[13, 14, 21]*/
double sum3 = 0.0;
/*[13, 14, 21]*/
double sum4 = 0.0;
/*[13, 14, 21]*/
#pragma omp single nowait
{
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[13, 14, 21]*/
/*[13, 14, 21]*/
sum[m_imopVarPre78] = 0.0;
}
}
/*[13, 14, 21]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 21]*/
#pragma omp barrier
/*[13, 14, 22]*/
#pragma omp for nowait
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0];
/*[13, 14, 22]*/
sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1];
/*[13, 14, 22]*/
sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2];
/*[13, 14, 22]*/
sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3];
/*[13, 14, 22]*/
sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4];
}
}
}
/*[13, 14, 22]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 22]*/
#pragma omp critical
{
/*[13, 14, 22]*/
/*[13, 14, 22]*/
sum[0] += sum0;
/*[13, 14, 22]*/
sum[1] += sum1;
/*[13, 14, 22]*/
sum[2] += sum2;
/*[13, 14, 22]*/
sum[3] += sum3;
/*[13, 14, 22]*/
sum[4] += sum4;
}
/*[13, 14, 22]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 22]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 22]*/
#pragma omp barrier
/*[13, 14, 23]*/
#pragma omp single nowait
{
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[13, 14, 23]*/
/*[13, 14, 23]*/
double _imopVarPre154;
/*[13, 14, 23]*/
double _imopVarPre155;
/*[13, 14, 23]*/
_imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 23]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 23]*/
/*[13, 14, 23]*/
sum[m_imopVarPre78] = _imopVarPre155;
}
}
}
/*[13, 14]*/
timer_clear(1);
/*[13, 14]*/
/*[13, 14]*/
timer_start(1);
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (istep = 1; istep <= itmax; istep++) {
/*[13, 14]*/
/*[13, 14]*/
int _imopVarPre372;
/*[13, 14]*/
int _imopVarPre370;
/*[13, 14]*/
int _imopVarPre371;
/*[13, 14]*/
_imopVarPre370 = istep % 20 == 0;
/*[13, 14]*/
/*[13, 14]*/
if (!_imopVarPre370) {
/*[13, 14]*/
/*[13, 14]*/
_imopVarPre371 = istep == itmax;
/*[13, 14]*/
/*[13, 14]*/
if (!_imopVarPre371) {
/*[13, 14]*/
/*[13, 14]*/
_imopVarPre371 = istep == 1;
}
/*[13, 14]*/
_imopVarPre370 = _imopVarPre371;
}
/*[13, 14]*/
/*[13, 14]*/
if (_imopVarPre370) {
/*[13, 14]*/
/*[13, 14]*/
#pragma omp master
{
/*[13, 14]*/
/*[13, 14]*/
printf(" Time step %4d\n", istep);
/*[13, 14]*/
}
}
/*[13, 14, 24]*/
#pragma omp parallel private(istep, i, j, k, m)
{
/*[13, 14, 24]*/
/*[13, 14, 24]*/
int _imopVarPre377;
/*[13, 14, 24]*/
int _imopVarPre378;
/*[13, 14, 24]*/
int _imopVarPre379;
/*[13, 14, 24]*/
int _imopVarPre380;
/*[13, 14, 24]*/
#pragma omp for nowait
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
/*[13, 14, 24]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 24]*/
#pragma omp barrier
/*[13, 14, 25]*/
/*[13, 14, 25]*/
/*[13, 14, 25]*/
/*[13, 14, 25]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 25]*/
/*[13, 14, 25]*/
jacld(k);
/*[13, 14, 25]*/
/*[13, 14, 25]*/
blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0);
/*[13, 14, 25]*/
}
/*[13, 14, 25]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 25]*/
#pragma omp barrier
/*[13, 14, 26]*/
/*[13, 14, 26]*/
/*[13, 14, 26]*/
/*[13, 14, 26]*/
for (k = nz - 2; k >= 1; k--) {
/*[13, 14, 26]*/
/*[13, 14, 26]*/
jacu(k);
/*[13, 14, 26]*/
/*[13, 14, 26]*/
buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0);
/*[13, 14, 26]*/
}
/*[13, 14, 26]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 26]*/
#pragma omp barrier
/*[13, 14, 27]*/
#pragma omp for nowait
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];
}
}
}
}
/*[13, 14, 27]*/
/*[13, 14, 27]*/
if (istep % inorm == 0) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 27]*/
double *sum;
/*[13, 14, 27]*/
v = rsd;
/*[13, 14, 27]*/
sum = delunm;
/*[13, 14, 27]*/
int i_imopVarPre89;
/*[13, 14, 27]*/
int j_imopVarPre90;
/*[13, 14, 27]*/
int k_imopVarPre91;
/*[13, 14, 27]*/
int m_imopVarPre92;
/*[13, 14, 27]*/
double sum0 = 0.0;
/*[13, 14, 27]*/
double sum1 = 0.0;
/*[13, 14, 27]*/
double sum2 = 0.0;
/*[13, 14, 27]*/
double sum3 = 0.0;
/*[13, 14, 27]*/
double sum4 = 0.0;
/*[13, 14, 27]*/
#pragma omp single nowait
{
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
sum[m_imopVarPre92] = 0.0;
}
}
/*[13, 14, 27]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 27]*/
#pragma omp barrier
/*[13, 14, 28]*/
#pragma omp for nowait
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0];
/*[13, 14, 28]*/
sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1];
/*[13, 14, 28]*/
sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2];
/*[13, 14, 28]*/
sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3];
/*[13, 14, 28]*/
sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4];
}
}
}
/*[13, 14, 28]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 28]*/
#pragma omp critical
{
/*[13, 14, 28]*/
/*[13, 14, 28]*/
sum[0] += sum0;
/*[13, 14, 28]*/
sum[1] += sum1;
/*[13, 14, 28]*/
sum[2] += sum2;
/*[13, 14, 28]*/
sum[3] += sum3;
/*[13, 14, 28]*/
sum[4] += sum4;
}
/*[13, 14, 28]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 28]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 28]*/
#pragma omp barrier
/*[13, 14, 29]*/
#pragma omp single nowait
{
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[13, 14, 29]*/
/*[13, 14, 29]*/
double _imopVarPre154;
/*[13, 14, 29]*/
double _imopVarPre155;
/*[13, 14, 29]*/
_imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 29]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 29]*/
/*[13, 14, 29]*/
sum[m_imopVarPre92] = _imopVarPre155;
}
}
/*[13, 14, 29]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 29]*/
#pragma omp barrier
/*[13, 14, 30]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 30]*/
#pragma omp barrier
}
/*[13, 14, 27, 31]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 27, 31]*/
#pragma omp barrier
/*[13, 14, 28, 32]*/
int i_imopVarPre79;
/*[13, 14, 28, 32]*/
int j_imopVarPre80;
/*[13, 14, 28, 32]*/
int k_imopVarPre81;
/*[13, 14, 28, 32]*/
int m_imopVarPre82;
/*[13, 14, 28, 32]*/
int L1;
/*[13, 14, 28, 32]*/
int L2;
/*[13, 14, 28, 32]*/
int ist1;
/*[13, 14, 28, 32]*/
int iend1;
/*[13, 14, 28, 32]*/
int jst1;
/*[13, 14, 28, 32]*/
int jend1;
/*[13, 14, 28, 32]*/
double q;
/*[13, 14, 28, 32]*/
double u21;
/*[13, 14, 28, 32]*/
double u31;
/*[13, 14, 28, 32]*/
double u41;
/*[13, 14, 28, 32]*/
double tmp_imopVarPre83;
/*[13, 14, 28, 32]*/
double u21i;
/*[13, 14, 28, 32]*/
double u31i;
/*[13, 14, 28, 32]*/
double u41i;
/*[13, 14, 28, 32]*/
double u51i;
/*[13, 14, 28, 32]*/
double u21j;
/*[13, 14, 28, 32]*/
double u31j;
/*[13, 14, 28, 32]*/
double u41j;
/*[13, 14, 28, 32]*/
double u51j;
/*[13, 14, 28, 32]*/
double u21k;
/*[13, 14, 28, 32]*/
double u31k;
/*[13, 14, 28, 32]*/
double u41k;
/*[13, 14, 28, 32]*/
double u51k;
/*[13, 14, 28, 32]*/
double u21im1;
/*[13, 14, 28, 32]*/
double u31im1;
/*[13, 14, 28, 32]*/
double u41im1;
/*[13, 14, 28, 32]*/
double u51im1;
/*[13, 14, 28, 32]*/
double u21jm1;
/*[13, 14, 28, 32]*/
double u31jm1;
/*[13, 14, 28, 32]*/
double u41jm1;
/*[13, 14, 28, 32]*/
double u51jm1;
/*[13, 14, 28, 32]*/
double u21km1;
/*[13, 14, 28, 32]*/
double u31km1;
/*[13, 14, 28, 32]*/
double u41km1;
/*[13, 14, 28, 32]*/
double u51km1;
/*[13, 14, 28, 32]*/
#pragma omp for nowait
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82];
}
}
}
}
/*[13, 14, 28, 32]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 28, 32]*/
#pragma omp barrier
/*[13, 14, 29, 33]*/
L1 = 0;
/*[13, 14, 29, 33]*/
L2 = nx - 1;
/*[13, 14, 29, 33]*/
#pragma omp for nowait
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 29, 33]*/
u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 29, 33]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21;
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21;
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21;
}
}
}
/*[13, 14, 29, 33]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 29, 33]*/
#pragma omp barrier
/*[13, 14, 30, 34]*/
#pragma omp for nowait
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 30, 34]*/
L2 = nx - 1;
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 30, 34]*/
u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 30, 34]*/
u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 30, 34]*/
u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 30, 34]*/
u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 30, 34]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 30, 34]*/
u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 30, 34]*/
u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 30, 34]*/
u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 30, 34]*/
u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]);
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 30, 34]*/
rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
/*[13, 14, 30, 34]*/
ist1 = 3;
/*[13, 14, 30, 34]*/
iend1 = nx - 4;
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 30, 34]*/
rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[13, 14, 30, 34]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 30, 34]*/
#pragma omp barrier
/*[13, 14, 31, 35]*/
L1 = 0;
/*[13, 14, 31, 35]*/
L2 = ny - 1;
/*[13, 14, 31, 35]*/
#pragma omp for nowait
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 31, 35]*/
u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 31, 35]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31;
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31;
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31;
}
}
}
/*[13, 14, 31, 35]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 31, 35]*/
#pragma omp barrier
/*[13, 14, 32, 36]*/
#pragma omp for nowait
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 32, 36]*/
L2 = ny - 1;
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 32, 36]*/
u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 32, 36]*/
u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 32, 36]*/
u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 32, 36]*/
u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 32, 36]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0];
/*[13, 14, 32, 36]*/
u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1];
/*[13, 14, 32, 36]*/
u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2];
/*[13, 14, 32, 36]*/
u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3];
/*[13, 14, 32, 36]*/
u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4];
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]);
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]);
}
/*[13, 14, 32, 36]*/
jst1 = 3;
/*[13, 14, 32, 36]*/
jend1 = ny - 4;
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[13, 14, 32, 36]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 32, 36]*/
#pragma omp barrier
/*[13, 14, 33, 37]*/
#pragma omp for nowait
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 33, 37]*/
u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41;
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41;
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41;
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]);
}
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 33, 37]*/
u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 33, 37]*/
u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 33, 37]*/
u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 33, 37]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0];
/*[13, 14, 33, 37]*/
u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1];
/*[13, 14, 33, 37]*/
u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2];
/*[13, 14, 33, 37]*/
u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3];
/*[13, 14, 33, 37]*/
u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4];
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]);
}
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
}
}
}
/*[13, 14, 33, 37]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 33, 37]*/
#pragma omp barrier
/*[13, 14, 34, 38]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 34, 38]*/
#pragma omp barrier
/*[13, 14, 35, 39]*/
#pragma omp master
{
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
_imopVarPre372 = (istep % inorm == 0);
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
if (!_imopVarPre372) {
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
_imopVarPre372 = (istep == itmax);
}
}
/*[13, 14, 35, 39]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 35, 39]*/
#pragma omp barrier
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
if (_imopVarPre372) {
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 36, 40]*/
double *sum;
/*[13, 14, 36, 40]*/
v = rsd;
/*[13, 14, 36, 40]*/
sum = rsdnm;
/*[13, 14, 36, 40]*/
int i_imopVarPre93;
/*[13, 14, 36, 40]*/
int j_imopVarPre94;
/*[13, 14, 36, 40]*/
int k_imopVarPre95;
/*[13, 14, 36, 40]*/
int m_imopVarPre96;
/*[13, 14, 36, 40]*/
double sum0 = 0.0;
/*[13, 14, 36, 40]*/
double sum1 = 0.0;
/*[13, 14, 36, 40]*/
double sum2 = 0.0;
/*[13, 14, 36, 40]*/
double sum3 = 0.0;
/*[13, 14, 36, 40]*/
double sum4 = 0.0;
/*[13, 14, 36, 40]*/
#pragma omp single nowait
{
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
sum[m_imopVarPre96] = 0.0;
}
}
/*[13, 14, 36, 40]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 36, 40]*/
#pragma omp barrier
/*[13, 14, 37]*/
#pragma omp for nowait
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0];
/*[13, 14, 37]*/
sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1];
/*[13, 14, 37]*/
sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2];
/*[13, 14, 37]*/
sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3];
/*[13, 14, 37]*/
sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4];
}
}
}
/*[13, 14, 37]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 37]*/
#pragma omp critical
{
/*[13, 14, 37]*/
/*[13, 14, 37]*/
sum[0] += sum0;
/*[13, 14, 37]*/
sum[1] += sum1;
/*[13, 14, 37]*/
sum[2] += sum2;
/*[13, 14, 37]*/
sum[3] += sum3;
/*[13, 14, 37]*/
sum[4] += sum4;
}
/*[13, 14, 37]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 37]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 37]*/
#pragma omp barrier
/*[13, 14, 38]*/
#pragma omp single nowait
{
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[13, 14, 38]*/
/*[13, 14, 38]*/
double _imopVarPre154;
/*[13, 14, 38]*/
double _imopVarPre155;
/*[13, 14, 38]*/
_imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 38]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 38]*/
/*[13, 14, 38]*/
sum[m_imopVarPre96] = _imopVarPre155;
}
}
/*[13, 14, 38]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 38]*/
#pragma omp barrier
/*[13, 14, 39]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 39]*/
#pragma omp barrier
}
/*[13, 14, 36, 40]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 36, 40]*/
#pragma omp barrier
/*[13, 14, 37]*/
#pragma omp master
{
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre377 = (rsdnm[0] < tolrsd[0]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre377) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre378 = (rsdnm[1] < tolrsd[1]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre378) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre379 = (rsdnm[2] < tolrsd[2]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre379) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre380 = (rsdnm[3] < tolrsd[3]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre380) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre380 = (rsdnm[4] < tolrsd[4]);
}
/*[13, 14, 37]*/
_imopVarPre379 = _imopVarPre380;
}
/*[13, 14, 37]*/
_imopVarPre378 = _imopVarPre379;
}
/*[13, 14, 37]*/
_imopVarPre377 = _imopVarPre378;
}
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre377) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
exit(1);
/*[13, 14, 37]*/
}
}
}
}
/*[13, 14]*/
timer_stop(1);
/*[13, 14]*/
/*[13, 14]*/
maxtime = timer_read(1);
/*[13, 14]*/
/*[]*/
error();
/*[]*/
/*[]*/
pintgr();
/*[]*/
/*[]*/
int *_imopVarPre144;
/*[]*/
char *_imopVarPre145;
/*[]*/
_imopVarPre144 = &verified;
/*[]*/
_imopVarPre145 = &class;
/*[]*/
verify(rsdnm, errnm, frc, _imopVarPre145, _imopVarPre144);
/*[]*/
/*[]*/
mflops = (double) itmax * (1984.77 * (double) nx0 * (double) ny0 * (double) nz0 - 10923.3 * (((double) (nx0 + ny0 + nz0) / 3.0) * ((double) (nx0 + ny0 + nz0) / 3.0)) + 27770.9 * (double) (nx0 + ny0 + nz0) / 3.0 - 144010.0) / (maxtime * 1000000.0);
/*[]*/
c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)");
/*[]*/
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
int i;
/*[13, 14, 25, 41]*/
int j;
/*[13, 14, 25, 41]*/
int m;
/*[13, 14, 25, 41]*/
double tmp;
/*[13, 14, 25, 41]*/
double tmp1;
/*[13, 14, 25, 41]*/
double tmat[5][5];
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]);
}
}
}
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != ist) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
while (flag[i - 1] == 0) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
/*[13, 14, 25, 41]*/
;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != iend) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
while (flag[i] == 1) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
/*[13, 14, 25, 41]*/
;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]);
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
tmat[m][0] = d[i][j][m][0];
/*[13, 14, 25, 41]*/
tmat[m][1] = d[i][j][m][1];
/*[13, 14, 25, 41]*/
tmat[m][2] = d[i][j][m][2];
/*[13, 14, 25, 41]*/
tmat[m][3] = d[i][j][m][3];
/*[13, 14, 25, 41]*/
tmat[m][4] = d[i][j][m][4];
}
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[0][0];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[1][0];
/*[13, 14, 25, 41]*/
tmat[1][1] = tmat[1][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[1][2] = tmat[1][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[1][3] = tmat[1][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[1][4] = tmat[1][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[2][0];
/*[13, 14, 25, 41]*/
tmat[2][1] = tmat[2][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][0];
/*[13, 14, 25, 41]*/
tmat[3][1] = tmat[3][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][0];
/*[13, 14, 25, 41]*/
tmat[4][1] = tmat[4][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[1][1];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[2][1];
/*[13, 14, 25, 41]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][1];
/*[13, 14, 25, 41]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][1];
/*[13, 14, 25, 41]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[2][2];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[2][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[2][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[2][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[2][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[3][3];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[3][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp;
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] / tmat[4][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] / tmat[3][3];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] / tmat[2][2];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] / tmat[1][1];
/*[13, 14, 25, 41]*/
v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][0] = v[i][j][k][0] / tmat[0][0];
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != ist) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
flag[i - 1] = 0;
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != iend) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
flag[i] = 1;
}
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
int i;
/*[13, 14, 26, 42]*/
int j;
/*[13, 14, 26, 42]*/
int m;
/*[13, 14, 26, 42]*/
double tmp;
/*[13, 14, 26, 42]*/
double tmp1;
/*[13, 14, 26, 42]*/
double tmat[5][5];
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]);
}
}
}
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != iend) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
while (flag[i + 1] == 0) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
/*[13, 14, 26, 42]*/
;
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != ist) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
while (flag[i] == 1) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
/*[13, 14, 26, 42]*/
;
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]);
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tmat[m][0] = d[i][j][m][0];
/*[13, 14, 26, 42]*/
tmat[m][1] = d[i][j][m][1];
/*[13, 14, 26, 42]*/
tmat[m][2] = d[i][j][m][2];
/*[13, 14, 26, 42]*/
tmat[m][3] = d[i][j][m][3];
/*[13, 14, 26, 42]*/
tmat[m][4] = d[i][j][m][4];
}
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[0][0];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[1][0];
/*[13, 14, 26, 42]*/
tmat[1][1] = tmat[1][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[1][2] = tmat[1][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[1][3] = tmat[1][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[1][4] = tmat[1][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[2][0];
/*[13, 14, 26, 42]*/
tmat[2][1] = tmat[2][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][0];
/*[13, 14, 26, 42]*/
tmat[3][1] = tmat[3][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][0];
/*[13, 14, 26, 42]*/
tmat[4][1] = tmat[4][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[1][1];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[2][1];
/*[13, 14, 26, 42]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][1];
/*[13, 14, 26, 42]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][1];
/*[13, 14, 26, 42]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[2][2];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[2][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[2][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[2][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[2][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[3][3];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[3][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp;
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] / tmat[4][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] / tmat[3][3];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] / tmat[2][2];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] / tmat[1][1];
/*[13, 14, 26, 42]*/
tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][0] = tv[i][j][0] / tmat[0][0];
/*[13, 14, 26, 42]*/
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
/*[13, 14, 26, 42]*/
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
/*[13, 14, 26, 42]*/
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
/*[13, 14, 26, 42]*/
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
/*[13, 14, 26, 42]*/
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != iend) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
flag[i + 1] = 0;
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != ist) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
flag[i] = 1;
}
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
}
}
/*[]*/
static void domain() {
/*[]*/
/*[]*/
nx = nx0;
/*[]*/
ny = ny0;
/*[]*/
nz = nz0;
/*[]*/
int _imopVarPre146;
/*[]*/
int _imopVarPre147;
/*[]*/
_imopVarPre146 = nx < 4;
/*[]*/
/*[]*/
if (!_imopVarPre146) {
/*[]*/
/*[]*/
_imopVarPre147 = ny < 4;
/*[]*/
/*[]*/
if (!_imopVarPre147) {
/*[]*/
/*[]*/
_imopVarPre147 = nz < 4;
}
/*[]*/
_imopVarPre146 = _imopVarPre147;
}
/*[]*/
/*[]*/
if (_imopVarPre146) {
/*[]*/
/*[]*/
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
int _imopVarPre148;
/*[]*/
int _imopVarPre149;
/*[]*/
_imopVarPre148 = nx > 12;
/*[]*/
/*[]*/
if (!_imopVarPre148) {
/*[]*/
/*[]*/
_imopVarPre149 = ny > 12;
/*[]*/
/*[]*/
if (!_imopVarPre149) {
/*[]*/
/*[]*/
_imopVarPre149 = nz > 12;
}
/*[]*/
_imopVarPre148 = _imopVarPre149;
}
/*[]*/
/*[]*/
if (_imopVarPre148) {
/*[]*/
/*[]*/
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz);
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
ist = 1;
/*[]*/
iend = nx - 2;
/*[]*/
jst = 1;
/*[]*/
jend = ny - 2;
}
/*[]*/
static void erhs() {
/*[]*/
/*[43]*/
#pragma omp parallel
{
/*[43]*/
/*[43]*/
int i;
/*[43]*/
int j;
/*[43]*/
int k;
/*[43]*/
int m;
/*[43]*/
int iglob;
/*[43]*/
int jglob;
/*[43]*/
int L1;
/*[43]*/
int L2;
/*[43]*/
int ist1;
/*[43]*/
int iend1;
/*[43]*/
int jst1;
/*[43]*/
int jend1;
/*[43]*/
double dsspm;
/*[43]*/
double xi;
/*[43]*/
double eta;
/*[43]*/
double zeta;
/*[43]*/
double q;
/*[43]*/
double u21;
/*[43]*/
double u31;
/*[43]*/
double u41;
/*[43]*/
double tmp;
/*[43]*/
double u21i;
/*[43]*/
double u31i;
/*[43]*/
double u41i;
/*[43]*/
double u51i;
/*[43]*/
double u21j;
/*[43]*/
double u31j;
/*[43]*/
double u41j;
/*[43]*/
double u51j;
/*[43]*/
double u21k;
/*[43]*/
double u31k;
/*[43]*/
double u41k;
/*[43]*/
double u51k;
/*[43]*/
double u21im1;
/*[43]*/
double u31im1;
/*[43]*/
double u41im1;
/*[43]*/
double u51im1;
/*[43]*/
double u21jm1;
/*[43]*/
double u31jm1;
/*[43]*/
double u41jm1;
/*[43]*/
double u51jm1;
/*[43]*/
double u21km1;
/*[43]*/
double u31km1;
/*[43]*/
double u41km1;
/*[43]*/
double u51km1;
/*[43]*/
dsspm = dssp;
/*[43]*/
#pragma omp for nowait
/*[43]*/
/*[43]*/
/*[43]*/
for (i = 0; i < nx; i++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (j = 0; j < ny; j++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (k = 0; k < nz; k++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (m = 0; m < 5; m++) {
/*[43]*/
/*[43]*/
frct[i][j][k][m] = 0.0;
}
}
}
}
/*[43]*/
#pragma omp for nowait
/*[43]*/
/*[43]*/
/*[43]*/
for (i = 0; i < nx; i++) {
/*[43]*/
/*[43]*/
iglob = i;
/*[43]*/
xi = ((double) iglob) / (nx0 - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (j = 0; j < ny; j++) {
/*[43]*/
/*[43]*/
jglob = j;
/*[43]*/
eta = ((double) jglob) / (ny0 - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (k = 0; k < nz; k++) {
/*[43]*/
/*[43]*/
zeta = ((double) k) / (nz - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (m = 0; m < 5; m++) {
/*[43]*/
/*[43]*/
rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*[43]*/
// #pragma omp dummyFlush BARRIER_START
/*[43]*/
#pragma omp barrier
/*[44]*/
L1 = 0;
/*[44]*/
L2 = nx - 1;
/*[44]*/
#pragma omp for nowait
/*[44]*/
/*[44]*/
/*[44]*/
for (i = L1; i <= L2; i++) {
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
for (j = jst; j <= jend; j++) {
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
for (k = 1; k < nz - 1; k++) {
/*[44]*/
/*[44]*/
flux[i][j][k][0] = rsd[i][j][k][1];
/*[44]*/
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
/*[44]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[44]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[44]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
/*[44]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
/*[44]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[44]*/
// #pragma omp dummyFlush BARRIER_START
/*[44]*/
#pragma omp barrier
/*[45]*/
#pragma omp for nowait
/*[45]*/
/*[45]*/
/*[45]*/
for (j = jst; j <= jend; j++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (k = 1; k <= nz - 2; k++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= iend; i++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= L2; i++) {
/*[45]*/
/*[45]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[45]*/
u21i = tmp * rsd[i][j][k][1];
/*[45]*/
u31i = tmp * rsd[i][j][k][2];
/*[45]*/
u41i = tmp * rsd[i][j][k][3];
/*[45]*/
u51i = tmp * rsd[i][j][k][4];
/*[45]*/
tmp = 1.0 / rsd[i - 1][j][k][0];
/*[45]*/
u21im1 = tmp * rsd[i - 1][j][k][1];
/*[45]*/
u31im1 = tmp * rsd[i - 1][j][k][2];
/*[45]*/
u41im1 = tmp * rsd[i - 1][j][k][3];
/*[45]*/
u51im1 = tmp * rsd[i - 1][j][k][4];
/*[45]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[45]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[45]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[45]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= iend; i++) {
/*[45]*/
/*[45]*/
frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);
/*[45]*/
frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);
/*[45]*/
frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);
/*[45]*/
frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);
/*[45]*/
frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);
/*[45]*/
frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);
}
/*[45]*/
ist1 = 3;
/*[45]*/
iend1 = nx - 4;
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist1; i <= iend1; i++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);
}
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);
/*[45]*/
frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);
}
}
}
/*[45]*/
// #pragma omp dummyFlush BARRIER_START
/*[45]*/
#pragma omp barrier
/*[46]*/
L1 = 0;
/*[46]*/
L2 = ny - 1;
/*[46]*/
#pragma omp for nowait
/*[46]*/
/*[46]*/
/*[46]*/
for (i = ist; i <= iend; i++) {
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
for (j = L1; j <= L2; j++) {
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
for (k = 1; k <= nz - 2; k++) {
/*[46]*/
/*[46]*/
flux[i][j][k][0] = rsd[i][j][k][2];
/*[46]*/
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
/*[46]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[46]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
/*[46]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[46]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
/*[46]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[46]*/
// #pragma omp dummyFlush BARRIER_START
/*[46]*/
#pragma omp barrier
/*[47]*/
#pragma omp for nowait
/*[47]*/
/*[47]*/
/*[47]*/
for (i = ist; i <= iend; i++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (k = 1; k <= nz - 2; k++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= jend; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= L2; j++) {
/*[47]*/
/*[47]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[47]*/
u21j = tmp * rsd[i][j][k][1];
/*[47]*/
u31j = tmp * rsd[i][j][k][2];
/*[47]*/
u41j = tmp * rsd[i][j][k][3];
/*[47]*/
u51j = tmp * rsd[i][j][k][4];
/*[47]*/
tmp = 1.0 / rsd[i][j - 1][k][0];
/*[47]*/
u21jm1 = tmp * rsd[i][j - 1][k][1];
/*[47]*/
u31jm1 = tmp * rsd[i][j - 1][k][2];
/*[47]*/
u41jm1 = tmp * rsd[i][j - 1][k][3];
/*[47]*/
u51jm1 = tmp * rsd[i][j - 1][k][4];
/*[47]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[47]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[47]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[47]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= jend; j++) {
/*[47]*/
/*[47]*/
frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);
/*[47]*/
frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);
/*[47]*/
frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);
/*[47]*/
frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);
/*[47]*/
frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);
/*[47]*/
frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);
}
/*[47]*/
jst1 = 3;
/*[47]*/
jend1 = ny - 4;
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst1; j <= jend1; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);
}
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);
/*[47]*/
frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);
}
}
}
/*[47]*/
// #pragma omp dummyFlush BARRIER_START
/*[47]*/
#pragma omp barrier
/*[48]*/
#pragma omp for nowait
/*[48]*/
/*[48]*/
/*[48]*/
for (i = ist; i <= iend; i++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (j = jst; j <= jend; j++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 0; k <= nz - 1; k++) {
/*[48]*/
/*[48]*/
flux[i][j][k][0] = rsd[i][j][k][3];
/*[48]*/
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
/*[48]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[48]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
/*[48]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
/*[48]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[48]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 2; k++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 1; k++) {
/*[48]*/
/*[48]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[48]*/
u21k = tmp * rsd[i][j][k][1];
/*[48]*/
u31k = tmp * rsd[i][j][k][2];
/*[48]*/
u41k = tmp * rsd[i][j][k][3];
/*[48]*/
u51k = tmp * rsd[i][j][k][4];
/*[48]*/
tmp = 1.0 / rsd[i][j][k - 1][0];
/*[48]*/
u21km1 = tmp * rsd[i][j][k - 1][1];
/*[48]*/
u31km1 = tmp * rsd[i][j][k - 1][2];
/*[48]*/
u41km1 = tmp * rsd[i][j][k - 1][3];
/*[48]*/
u51km1 = tmp * rsd[i][j][k - 1][4];
/*[48]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[48]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[48]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[48]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 2; k++) {
/*[48]*/
/*[48]*/
frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);
/*[48]*/
frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);
/*[48]*/
frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);
/*[48]*/
frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);
/*[48]*/
frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);
/*[48]*/
frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 3; k <= nz - 4; k++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);
}
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);
/*[48]*/
frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);
}
}
}
}
}
/*[]*/
static void error() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int iglob;
/*[]*/
int jglob;
/*[]*/
double tmp;
/*[]*/
double u000ijk[5];
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
errnm[m] = 0.0;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
exact(iglob, jglob, k, u000ijk);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
tmp = (u000ijk[m] - u[i][j][k][m]);
/*[]*/
errnm[m] = errnm[m] + tmp * tmp;
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre151;
/*[]*/
double _imopVarPre152;
/*[]*/
_imopVarPre151 = errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[]*/
_imopVarPre152 = sqrt(_imopVarPre151);
/*[]*/
/*[]*/
errnm[m] = _imopVarPre152;
}
}
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
static void exact(int i, int j , int k , double u000ijk[5]) {
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
int m;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double xi;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double eta;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double zeta;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
xi = ((double) i) / (nx0 - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
eta = ((double) j) / (ny0 - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
zeta = ((double) k) / (nz - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
for (m = 0; m < 5; m++) {
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
static void jacld(int k) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
int i;
/*[13, 14, 25, 41]*/
int j;
/*[13, 14, 25, 41]*/
double r43;
/*[13, 14, 25, 41]*/
double c1345;
/*[13, 14, 25, 41]*/
double c34;
/*[13, 14, 25, 41]*/
double tmp1;
/*[13, 14, 25, 41]*/
double tmp2;
/*[13, 14, 25, 41]*/
double tmp3;
/*[13, 14, 25, 41]*/
r43 = (4.0 / 3.0);
/*[13, 14, 25, 41]*/
c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00;
/*[13, 14, 25, 41]*/
c34 = 1.00e-01 * 1.00e+00;
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);
/*[13, 14, 25, 41]*/
d[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));
/*[13, 14, 25, 41]*/
d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);
/*[13, 14, 25, 41]*/
d[i][j][1][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));
/*[13, 14, 25, 41]*/
d[i][j][2][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);
/*[13, 14, 25, 41]*/
d[i][j][2][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));
/*[13, 14, 25, 41]*/
d[i][j][3][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);
/*[13, 14, 25, 41]*/
d[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]));
/*[13, 14, 25, 41]*/
d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);
/*[13, 14, 25, 41]*/
d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);
/*[13, 14, 25, 41]*/
d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);
/*[13, 14, 25, 41]*/
d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j][k - 1][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
a[i][j][0][0] = -dt * tz1 * dz1;
/*[13, 14, 25, 41]*/
a[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][0][3] = -dt * tz2;
/*[13, 14, 25, 41]*/
a[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]);
/*[13, 14, 25, 41]*/
a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;
/*[13, 14, 25, 41]*/
a[i][j][1][2] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1);
/*[13, 14, 25, 41]*/
a[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]);
/*[13, 14, 25, 41]*/
a[i][j][2][1] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;
/*[13, 14, 25, 41]*/
a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1);
/*[13, 14, 25, 41]*/
a[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]);
/*[13, 14, 25, 41]*/
a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1));
/*[13, 14, 25, 41]*/
a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1));
/*[13, 14, 25, 41]*/
a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;
/*[13, 14, 25, 41]*/
a[i][j][3][4] = -dt * tz2 * 0.40e+00;
/*[13, 14, 25, 41]*/
a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]);
/*[13, 14, 25, 41]*/
a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1];
/*[13, 14, 25, 41]*/
a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2];
/*[13, 14, 25, 41]*/
a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3];
/*[13, 14, 25, 41]*/
a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j - 1][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
b[i][j][0][0] = -dt * ty1 * dy1;
/*[13, 14, 25, 41]*/
b[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][0][2] = -dt * ty2;
/*[13, 14, 25, 41]*/
b[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]);
/*[13, 14, 25, 41]*/
b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;
/*[13, 14, 25, 41]*/
b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1);
/*[13, 14, 25, 41]*/
b[i][j][1][3] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]);
/*[13, 14, 25, 41]*/
b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1));
/*[13, 14, 25, 41]*/
b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;
/*[13, 14, 25, 41]*/
b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1));
/*[13, 14, 25, 41]*/
b[i][j][2][4] = -dt * ty2 * 0.40e+00;
/*[13, 14, 25, 41]*/
b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]);
/*[13, 14, 25, 41]*/
b[i][j][3][1] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1);
/*[13, 14, 25, 41]*/
b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;
/*[13, 14, 25, 41]*/
b[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j - 1][k][1]) * (u[i][j - 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j - 1][k][2]) * (u[i][j - 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j - 1][k][3]) * (u[i][j - 1][k][3]))) - c1345 * tmp2 * u[i][j - 1][k][4]);
/*[13, 14, 25, 41]*/
b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1];
/*[13, 14, 25, 41]*/
b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2];
/*[13, 14, 25, 41]*/
b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3];
/*[13, 14, 25, 41]*/
b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i - 1][j][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
c[i][j][0][0] = -dt * tx1 * dx1;
/*[13, 14, 25, 41]*/
c[i][j][0][1] = -dt * tx2;
/*[13, 14, 25, 41]*/
c[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]);
/*[13, 14, 25, 41]*/
c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;
/*[13, 14, 25, 41]*/
c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1));
/*[13, 14, 25, 41]*/
c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1));
/*[13, 14, 25, 41]*/
c[i][j][1][4] = -dt * tx2 * 0.40e+00;
/*[13, 14, 25, 41]*/
c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]);
/*[13, 14, 25, 41]*/
c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1);
/*[13, 14, 25, 41]*/
c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;
/*[13, 14, 25, 41]*/
c[i][j][2][3] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]);
/*[13, 14, 25, 41]*/
c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1);
/*[13, 14, 25, 41]*/
c[i][j][3][2] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;
/*[13, 14, 25, 41]*/
c[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i - 1][j][k][1]) * (u[i - 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][2]) * (u[i - 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][3]) * (u[i - 1][j][k][3]))) - c1345 * tmp2 * u[i - 1][j][k][4]);
/*[13, 14, 25, 41]*/
c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1];
/*[13, 14, 25, 41]*/
c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2];
/*[13, 14, 25, 41]*/
c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3];
/*[13, 14, 25, 41]*/
c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;
}
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
static void jacu(int k) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
int i;
/*[13, 14, 26, 42]*/
int j;
/*[13, 14, 26, 42]*/
double r43;
/*[13, 14, 26, 42]*/
double c1345;
/*[13, 14, 26, 42]*/
double c34;
/*[13, 14, 26, 42]*/
double tmp1;
/*[13, 14, 26, 42]*/
double tmp2;
/*[13, 14, 26, 42]*/
double tmp3;
/*[13, 14, 26, 42]*/
r43 = (4.0 / 3.0);
/*[13, 14, 26, 42]*/
c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00;
/*[13, 14, 26, 42]*/
c34 = 1.00e-01 * 1.00e+00;
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);
/*[13, 14, 26, 42]*/
d[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));
/*[13, 14, 26, 42]*/
d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);
/*[13, 14, 26, 42]*/
d[i][j][1][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));
/*[13, 14, 26, 42]*/
d[i][j][2][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);
/*[13, 14, 26, 42]*/
d[i][j][2][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));
/*[13, 14, 26, 42]*/
d[i][j][3][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);
/*[13, 14, 26, 42]*/
d[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]));
/*[13, 14, 26, 42]*/
d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);
/*[13, 14, 26, 42]*/
d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);
/*[13, 14, 26, 42]*/
d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);
/*[13, 14, 26, 42]*/
d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i + 1][j][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
a[i][j][0][0] = -dt * tx1 * dx1;
/*[13, 14, 26, 42]*/
a[i][j][0][1] = dt * tx2;
/*[13, 14, 26, 42]*/
a[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]);
/*[13, 14, 26, 42]*/
a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;
/*[13, 14, 26, 42]*/
a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1));
/*[13, 14, 26, 42]*/
a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1));
/*[13, 14, 26, 42]*/
a[i][j][1][4] = dt * tx2 * 0.40e+00;
/*[13, 14, 26, 42]*/
a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]);
/*[13, 14, 26, 42]*/
a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1);
/*[13, 14, 26, 42]*/
a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;
/*[13, 14, 26, 42]*/
a[i][j][2][3] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]);
/*[13, 14, 26, 42]*/
a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1);
/*[13, 14, 26, 42]*/
a[i][j][3][2] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;
/*[13, 14, 26, 42]*/
a[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i + 1][j][k][1]) * (u[i + 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][2]) * (u[i + 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][3]) * (u[i + 1][j][k][3]))) - c1345 * tmp2 * u[i + 1][j][k][4]);
/*[13, 14, 26, 42]*/
a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1];
/*[13, 14, 26, 42]*/
a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2];
/*[13, 14, 26, 42]*/
a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3];
/*[13, 14, 26, 42]*/
a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j + 1][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
b[i][j][0][0] = -dt * ty1 * dy1;
/*[13, 14, 26, 42]*/
b[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][0][2] = dt * ty2;
/*[13, 14, 26, 42]*/
b[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]);
/*[13, 14, 26, 42]*/
b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;
/*[13, 14, 26, 42]*/
b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1);
/*[13, 14, 26, 42]*/
b[i][j][1][3] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]);
/*[13, 14, 26, 42]*/
b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1));
/*[13, 14, 26, 42]*/
b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;
/*[13, 14, 26, 42]*/
b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1));
/*[13, 14, 26, 42]*/
b[i][j][2][4] = dt * ty2 * 0.40e+00;
/*[13, 14, 26, 42]*/
b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]);
/*[13, 14, 26, 42]*/
b[i][j][3][1] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1);
/*[13, 14, 26, 42]*/
b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;
/*[13, 14, 26, 42]*/
b[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j + 1][k][1]) * (u[i][j + 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j + 1][k][2]) * (u[i][j + 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j + 1][k][3]) * (u[i][j + 1][k][3]))) - c1345 * tmp2 * u[i][j + 1][k][4]);
/*[13, 14, 26, 42]*/
b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1];
/*[13, 14, 26, 42]*/
b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2];
/*[13, 14, 26, 42]*/
b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3];
/*[13, 14, 26, 42]*/
b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j][k + 1][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
c[i][j][0][0] = -dt * tz1 * dz1;
/*[13, 14, 26, 42]*/
c[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][0][3] = dt * tz2;
/*[13, 14, 26, 42]*/
c[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]);
/*[13, 14, 26, 42]*/
c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;
/*[13, 14, 26, 42]*/
c[i][j][1][2] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1);
/*[13, 14, 26, 42]*/
c[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]);
/*[13, 14, 26, 42]*/
c[i][j][2][1] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;
/*[13, 14, 26, 42]*/
c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1);
/*[13, 14, 26, 42]*/
c[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]);
/*[13, 14, 26, 42]*/
c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1));
/*[13, 14, 26, 42]*/
c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1));
/*[13, 14, 26, 42]*/
c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;
/*[13, 14, 26, 42]*/
c[i][j][3][4] = dt * tz2 * 0.40e+00;
/*[13, 14, 26, 42]*/
c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k + 1][1]) * (u[i][j][k + 1][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k + 1][2]) * (u[i][j][k + 1][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k + 1][3]) * (u[i][j][k + 1][3]))) - c1345 * tmp2 * u[i][j][k + 1][4]);
/*[13, 14, 26, 42]*/
c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1];
/*[13, 14, 26, 42]*/
c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2];
/*[13, 14, 26, 42]*/
c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3];
/*[13, 14, 26, 42]*/
c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]) {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
double sum0 = 0.0;
/*[]*/
double sum1 = 0.0;
/*[]*/
double sum2 = 0.0;
/*[]*/
double sum3 = 0.0;
/*[]*/
double sum4 = 0.0;
/*[]*/
#pragma omp single nowait
{
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
sum[m] = 0.0;
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz0 - 2; k++) {
/*[]*/
/*[]*/
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
/*[]*/
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
/*[]*/
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
/*[]*/
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
/*[]*/
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_START
/*[]*/
#pragma omp critical
{
/*[]*/
/*[]*/
sum[0] += sum0;
/*[]*/
sum[1] += sum1;
/*[]*/
sum[2] += sum2;
/*[]*/
sum[3] += sum3;
/*[]*/
sum[4] += sum4;
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_END
/*[]*/
#pragma omp single nowait
{
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre154;
/*[]*/
double _imopVarPre155;
/*[]*/
_imopVarPre154 = sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[]*/
/*[]*/
sum[m] = _imopVarPre155;
}
}
}
/*[]*/
static void pintgr() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int ibeg;
/*[]*/
int ifin;
/*[]*/
int ifin1;
/*[]*/
int jbeg;
/*[]*/
int jfin;
/*[]*/
int jfin1;
/*[]*/
int iglob;
/*[]*/
int iglob1;
/*[]*/
int iglob2;
/*[]*/
int jglob;
/*[]*/
int jglob1;
/*[]*/
int jglob2;
/*[]*/
double phi1[12 + 2][12 + 2];
/*[]*/
double phi2[12 + 2][12 + 2];
/*[]*/
double frc1;
/*[]*/
double frc2;
/*[]*/
double frc3;
/*[]*/
ibeg = nx;
/*[]*/
ifin = 0;
/*[]*/
iglob1 = -1;
/*[]*/
iglob2 = nx - 1;
/*[]*/
int _imopVarPre157;
/*[]*/
_imopVarPre157 = iglob1 >= ii1;
/*[]*/
/*[]*/
if (_imopVarPre157) {
/*[]*/
/*[]*/
_imopVarPre157 = iglob2 < ii2 + nx;
}
/*[]*/
/*[]*/
if (_imopVarPre157) {
/*[]*/
/*[]*/
ibeg = 0;
}
/*[]*/
int _imopVarPre159;
/*[]*/
_imopVarPre159 = iglob1 >= ii1 - nx;
/*[]*/
/*[]*/
if (_imopVarPre159) {
/*[]*/
/*[]*/
_imopVarPre159 = iglob2 <= ii2;
}
/*[]*/
/*[]*/
if (_imopVarPre159) {
/*[]*/
/*[]*/
ifin = nx;
}
/*[]*/
int _imopVarPre161;
/*[]*/
_imopVarPre161 = ii1 >= iglob1;
/*[]*/
/*[]*/
if (_imopVarPre161) {
/*[]*/
/*[]*/
_imopVarPre161 = ii1 <= iglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre161) {
/*[]*/
/*[]*/
ibeg = ii1;
}
/*[]*/
int _imopVarPre163;
/*[]*/
_imopVarPre163 = ii2 >= iglob1;
/*[]*/
/*[]*/
if (_imopVarPre163) {
/*[]*/
/*[]*/
_imopVarPre163 = ii2 <= iglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre163) {
/*[]*/
/*[]*/
ifin = ii2;
}
/*[]*/
jbeg = ny;
/*[]*/
jfin = -1;
/*[]*/
jglob1 = 0;
/*[]*/
jglob2 = ny - 1;
/*[]*/
int _imopVarPre165;
/*[]*/
_imopVarPre165 = jglob1 >= ji1;
/*[]*/
/*[]*/
if (_imopVarPre165) {
/*[]*/
/*[]*/
_imopVarPre165 = jglob2 < ji2 + ny;
}
/*[]*/
/*[]*/
if (_imopVarPre165) {
/*[]*/
/*[]*/
jbeg = 0;
}
/*[]*/
int _imopVarPre167;
/*[]*/
_imopVarPre167 = jglob1 > ji1 - ny;
/*[]*/
/*[]*/
if (_imopVarPre167) {
/*[]*/
/*[]*/
_imopVarPre167 = jglob2 <= ji2;
}
/*[]*/
/*[]*/
if (_imopVarPre167) {
/*[]*/
/*[]*/
jfin = ny;
}
/*[]*/
int _imopVarPre169;
/*[]*/
_imopVarPre169 = ji1 >= jglob1;
/*[]*/
/*[]*/
if (_imopVarPre169) {
/*[]*/
/*[]*/
_imopVarPre169 = ji1 <= jglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre169) {
/*[]*/
/*[]*/
jbeg = ji1;
}
/*[]*/
int _imopVarPre171;
/*[]*/
_imopVarPre171 = ji2 >= jglob1;
/*[]*/
/*[]*/
if (_imopVarPre171) {
/*[]*/
/*[]*/
_imopVarPre171 = ji2 <= jglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre171) {
/*[]*/
/*[]*/
jfin = ji2;
}
/*[]*/
ifin1 = ifin;
/*[]*/
jfin1 = jfin;
/*[]*/
/*[]*/
if (ifin1 == ii2) {
/*[]*/
/*[]*/
ifin1 = ifin - 1;
}
/*[]*/
/*[]*/
if (jfin1 == ji2) {
/*[]*/
/*[]*/
jfin1 = jfin - 1;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
k = ki1;
/*[]*/
phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]);
/*[]*/
k = ki2;
/*[]*/
phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]);
}
}
/*[]*/
frc1 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin1; j++) {
/*[]*/
/*[]*/
frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]);
}
}
/*[]*/
frc1 = dxi * deta * frc1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
jglob = jbeg;
/*[]*/
/*[]*/
if (jglob == ji1) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (((u[i][jbeg][k][1]) * (u[i][jbeg][k][1])) + ((u[i][jbeg][k][2]) * (u[i][jbeg][k][2])) + ((u[i][jbeg][k][3]) * (u[i][jbeg][k][3]))) / u[i][jbeg][k][0]);
}
}
}
/*[]*/
jglob = jfin;
/*[]*/
/*[]*/
if (jglob == ji2) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (((u[i][jfin][k][1]) * (u[i][jfin][k][1])) + ((u[i][jfin][k][2]) * (u[i][jfin][k][2])) + ((u[i][jfin][k][3]) * (u[i][jfin][k][3]))) / u[i][jfin][k][0]);
}
}
}
/*[]*/
frc2 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2 - 1; k++) {
/*[]*/
/*[]*/
frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]);
}
}
/*[]*/
frc2 = dxi * dzeta * frc2;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
iglob = ibeg;
/*[]*/
/*[]*/
if (iglob == ii1) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (((u[ibeg][j][k][1]) * (u[ibeg][j][k][1])) + ((u[ibeg][j][k][2]) * (u[ibeg][j][k][2])) + ((u[ibeg][j][k][3]) * (u[ibeg][j][k][3]))) / u[ibeg][j][k][0]);
}
}
}
/*[]*/
iglob = ifin;
/*[]*/
/*[]*/
if (iglob == ii2) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (((u[ifin][j][k][1]) * (u[ifin][j][k][1])) + ((u[ifin][j][k][2]) * (u[ifin][j][k][2])) + ((u[ifin][j][k][3]) * (u[ifin][j][k][3]))) / u[ifin][j][k][0]);
}
}
}
/*[]*/
frc3 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2 - 1; k++) {
/*[]*/
/*[]*/
frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]);
}
}
/*[]*/
frc3 = deta * dzeta * frc3;
/*[]*/
frc = 0.25 * (frc1 + frc2 + frc3);
}
/*[]*/
static void read_input() {
/*[]*/
/*[]*/
FILE *fp;
/*[]*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n");
/*[]*/
/*[]*/
fp = fopen("inputlu.data", "r");
/*[]*/
/*[]*/
/*[]*/
if (fp != ((void *) 0)) {
/*[]*/
/*[]*/
printf(" Reading from input file inputlu.data\n");
/*[]*/
/*[]*/
int _imopVarPre173;
/*[]*/
_imopVarPre173 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre173 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre173 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre175;
/*[]*/
_imopVarPre175 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre175 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre175 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre178;
/*[]*/
int *_imopVarPre179;
/*[]*/
_imopVarPre178 = &inorm;
/*[]*/
_imopVarPre179 = &ipr;
/*[]*/
fscanf(fp, "%d%d", _imopVarPre179, _imopVarPre178);
/*[]*/
/*[]*/
int _imopVarPre181;
/*[]*/
_imopVarPre181 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre181 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre181 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre183;
/*[]*/
_imopVarPre183 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre183 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre183 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre185;
/*[]*/
_imopVarPre185 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre185 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre185 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre187;
/*[]*/
_imopVarPre187 = &itmax;
/*[]*/
fscanf(fp, "%d", _imopVarPre187);
/*[]*/
/*[]*/
int _imopVarPre189;
/*[]*/
_imopVarPre189 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre189 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre189 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre191;
/*[]*/
_imopVarPre191 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre191 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre191 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre193;
/*[]*/
_imopVarPre193 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre193 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre193 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre195;
/*[]*/
_imopVarPre195 = &dt;
/*[]*/
fscanf(fp, "%lf", _imopVarPre195);
/*[]*/
/*[]*/
int _imopVarPre197;
/*[]*/
_imopVarPre197 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre197 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre197 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre199;
/*[]*/
_imopVarPre199 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre199 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre199 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre201;
/*[]*/
_imopVarPre201 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre201 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre201 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre203;
/*[]*/
_imopVarPre203 = ω
/*[]*/
fscanf(fp, "%lf", _imopVarPre203);
/*[]*/
/*[]*/
int _imopVarPre205;
/*[]*/
_imopVarPre205 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre205 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre205 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre207;
/*[]*/
_imopVarPre207 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre207 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre207 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre209;
/*[]*/
_imopVarPre209 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre209 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre209 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre215;
/*[]*/
double *_imopVarPre216;
/*[]*/
double *_imopVarPre217;
/*[]*/
double *_imopVarPre218;
/*[]*/
double *_imopVarPre219;
/*[]*/
_imopVarPre215 = &tolrsd[4];
/*[]*/
_imopVarPre216 = &tolrsd[3];
/*[]*/
_imopVarPre217 = &tolrsd[2];
/*[]*/
_imopVarPre218 = &tolrsd[1];
/*[]*/
_imopVarPre219 = &tolrsd[0];
/*[]*/
fscanf(fp, "%lf%lf%lf%lf%lf", _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215);
/*[]*/
/*[]*/
int _imopVarPre221;
/*[]*/
_imopVarPre221 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre221 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre221 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre223;
/*[]*/
_imopVarPre223 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre223 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre223 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre225;
/*[]*/
_imopVarPre225 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre225 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre225 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre229;
/*[]*/
int *_imopVarPre230;
/*[]*/
int *_imopVarPre231;
/*[]*/
_imopVarPre229 = &nz0;
/*[]*/
_imopVarPre230 = &ny0;
/*[]*/
_imopVarPre231 = &nx0;
/*[]*/
fscanf(fp, "%d%d%d", _imopVarPre231, _imopVarPre230, _imopVarPre229);
/*[]*/
/*[]*/
int _imopVarPre233;
/*[]*/
_imopVarPre233 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre233 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre233 = fgetc(fp);
/*[]*/
}
/*[]*/
fclose(fp);
/*[]*/
} else {
/*[]*/
/*[]*/
ipr = 1;
/*[]*/
inorm = 50;
/*[]*/
itmax = 50;
/*[]*/
dt = 0.5;
/*[]*/
omega = 1.2;
/*[]*/
tolrsd[0] = 1.0e-8;
/*[]*/
tolrsd[1] = 1.0e-8;
/*[]*/
tolrsd[2] = 1.0e-8;
/*[]*/
tolrsd[3] = 1.0e-8;
/*[]*/
tolrsd[4] = 1.0e-8;
/*[]*/
nx0 = 12;
/*[]*/
ny0 = 12;
/*[]*/
nz0 = 12;
}
/*[]*/
int _imopVarPre234;
/*[]*/
int _imopVarPre235;
/*[]*/
_imopVarPre234 = nx0 < 4;
/*[]*/
/*[]*/
if (!_imopVarPre234) {
/*[]*/
/*[]*/
_imopVarPre235 = ny0 < 4;
/*[]*/
/*[]*/
if (!_imopVarPre235) {
/*[]*/
/*[]*/
_imopVarPre235 = nz0 < 4;
}
/*[]*/
_imopVarPre234 = _imopVarPre235;
}
/*[]*/
/*[]*/
if (_imopVarPre234) {
/*[]*/
/*[]*/
printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
int _imopVarPre236;
/*[]*/
int _imopVarPre237;
/*[]*/
_imopVarPre236 = nx0 > 12;
/*[]*/
/*[]*/
if (!_imopVarPre236) {
/*[]*/
/*[]*/
_imopVarPre237 = ny0 > 12;
/*[]*/
/*[]*/
if (!_imopVarPre237) {
/*[]*/
/*[]*/
_imopVarPre237 = nz0 > 12;
}
/*[]*/
_imopVarPre236 = _imopVarPre237;
}
/*[]*/
/*[]*/
if (_imopVarPre236) {
/*[]*/
/*[]*/
printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
/*[]*/
/*[]*/
printf(" Iterations: %3d\n", itmax);
/*[]*/
}
/*[]*/
static void rhs() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int L1;
/*[]*/
int L2;
/*[]*/
int ist1;
/*[]*/
int iend1;
/*[]*/
int jst1;
/*[]*/
int jend1;
/*[]*/
double q;
/*[]*/
double u21;
/*[]*/
double u31;
/*[]*/
double u41;
/*[]*/
double tmp;
/*[]*/
double u21i;
/*[]*/
double u31i;
/*[]*/
double u41i;
/*[]*/
double u51i;
/*[]*/
double u21j;
/*[]*/
double u31j;
/*[]*/
double u41j;
/*[]*/
double u51j;
/*[]*/
double u21k;
/*[]*/
double u31k;
/*[]*/
double u41k;
/*[]*/
double u51k;
/*[]*/
double u21im1;
/*[]*/
double u31im1;
/*[]*/
double u41im1;
/*[]*/
double u51im1;
/*[]*/
double u21jm1;
/*[]*/
double u31jm1;
/*[]*/
double u41jm1;
/*[]*/
double u51jm1;
/*[]*/
double u21km1;
/*[]*/
double u31km1;
/*[]*/
double u41km1;
/*[]*/
double u51km1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= nx - 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = 0; j <= ny - 1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= nz - 1; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = -frct[i][j][k][m];
}
}
}
}
/*[]*/
L1 = 0;
/*[]*/
L2 = nx - 1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = L1; i <= L2; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][1];
/*[]*/
u21 = u[i][j][k][1] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u21;
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u21;
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[]*/
L2 = nx - 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= L2; i++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21i = tmp * u[i][j][k][1];
/*[]*/
u31i = tmp * u[i][j][k][2];
/*[]*/
u41i = tmp * u[i][j][k][3];
/*[]*/
u51i = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i - 1][j][k][0];
/*[]*/
u21im1 = tmp * u[i - 1][j][k][1];
/*[]*/
u31im1 = tmp * u[i - 1][j][k][2];
/*[]*/
u41im1 = tmp * u[i - 1][j][k][3];
/*[]*/
u51im1 = tmp * u[i - 1][j][k][4];
/*[]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]);
/*[]*/
rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]);
}
/*[]*/
ist1 = 3;
/*[]*/
iend1 = nx - 4;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist1; i <= iend1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]);
/*[]*/
rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]);
}
}
}
/*[]*/
L1 = 0;
/*[]*/
L2 = ny - 1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = L1; j <= L2; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][2];
/*[]*/
u31 = u[i][j][k][2] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u31;
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u31;
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[]*/
L2 = ny - 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= L2; j++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21j = tmp * u[i][j][k][1];
/*[]*/
u31j = tmp * u[i][j][k][2];
/*[]*/
u41j = tmp * u[i][j][k][3];
/*[]*/
u51j = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i][j - 1][k][0];
/*[]*/
u21jm1 = tmp * u[i][j - 1][k][1];
/*[]*/
u31jm1 = tmp * u[i][j - 1][k][2];
/*[]*/
u41jm1 = tmp * u[i][j - 1][k][3];
/*[]*/
u51jm1 = tmp * u[i][j - 1][k][4];
/*[]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]);
/*[]*/
rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]);
}
/*[]*/
jst1 = 3;
/*[]*/
jend1 = ny - 4;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst1; j <= jend1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]);
/*[]*/
rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]);
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= nz - 1; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][3];
/*[]*/
u41 = u[i][j][k][3] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u41;
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u41;
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 1; k++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21k = tmp * u[i][j][k][1];
/*[]*/
u31k = tmp * u[i][j][k][2];
/*[]*/
u41k = tmp * u[i][j][k][3];
/*[]*/
u51k = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i][j][k - 1][0];
/*[]*/
u21km1 = tmp * u[i][j][k - 1][1];
/*[]*/
u31km1 = tmp * u[i][j][k - 1][2];
/*[]*/
u41km1 = tmp * u[i][j][k - 1][3];
/*[]*/
u51km1 = tmp * u[i][j][k - 1][4];
/*[]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]);
/*[]*/
rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 3; k <= nz - 4; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]);
/*[]*/
rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]);
}
}
}
}
/*[]*/
static void setbv() {
/*[]*/
/*[49]*/
#pragma omp parallel
{
/*[49]*/
/*[49]*/
int i;
/*[49]*/
int j;
/*[49]*/
int k;
/*[49]*/
int iglob;
/*[49]*/
int jglob;
/*[49]*/
#pragma omp for nowait
/*[49]*/
/*[49]*/
/*[49]*/
for (i = 0; i < nx; i++) {
/*[49]*/
/*[49]*/
iglob = i;
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (j = 0; j < ny; j++) {
/*[49]*/
/*[49]*/
jglob = j;
/*[49]*/
double *_imopVarPre239;
/*[49]*/
_imopVarPre239 = &u[i][j][0][0];
/*[49]*/
exact(iglob, jglob, 0, _imopVarPre239);
/*[49]*/
/*[49]*/
double *_imopVarPre242;
/*[49]*/
int _imopVarPre243;
/*[49]*/
_imopVarPre242 = &u[i][j][nz - 1][0];
/*[49]*/
_imopVarPre243 = nz - 1;
/*[49]*/
exact(iglob, jglob, _imopVarPre243, _imopVarPre242);
/*[49]*/
}
}
/*[49]*/
// #pragma omp dummyFlush BARRIER_START
/*[49]*/
#pragma omp barrier
/*[50]*/
#pragma omp for nowait
/*[50]*/
/*[50]*/
/*[50]*/
for (i = 0; i < nx; i++) {
/*[50]*/
/*[50]*/
iglob = i;
/*[50]*/
/*[50]*/
/*[50]*/
/*[50]*/
for (k = 0; k < nz; k++) {
/*[50]*/
/*[50]*/
double *_imopVarPre245;
/*[50]*/
_imopVarPre245 = &u[i][0][k][0];
/*[50]*/
exact(iglob, 0, k, _imopVarPre245);
/*[50]*/
}
}
/*[50]*/
// #pragma omp dummyFlush BARRIER_START
/*[50]*/
#pragma omp barrier
/*[51]*/
#pragma omp for nowait
/*[51]*/
/*[51]*/
/*[51]*/
for (i = 0; i < nx; i++) {
/*[51]*/
/*[51]*/
iglob = i;
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (k = 0; k < nz; k++) {
/*[51]*/
/*[51]*/
double *_imopVarPre248;
/*[51]*/
int _imopVarPre249;
/*[51]*/
_imopVarPre248 = &u[i][ny - 1][k][0];
/*[51]*/
_imopVarPre249 = ny0 - 1;
/*[51]*/
exact(iglob, _imopVarPre249, k, _imopVarPre248);
/*[51]*/
}
}
/*[51]*/
// #pragma omp dummyFlush BARRIER_START
/*[51]*/
#pragma omp barrier
/*[52]*/
#pragma omp for nowait
/*[52]*/
/*[52]*/
/*[52]*/
for (j = 0; j < ny; j++) {
/*[52]*/
/*[52]*/
jglob = j;
/*[52]*/
/*[52]*/
/*[52]*/
/*[52]*/
for (k = 0; k < nz; k++) {
/*[52]*/
/*[52]*/
double *_imopVarPre251;
/*[52]*/
_imopVarPre251 = &u[0][j][k][0];
/*[52]*/
exact(0, jglob, k, _imopVarPre251);
/*[52]*/
}
}
/*[52]*/
// #pragma omp dummyFlush BARRIER_START
/*[52]*/
#pragma omp barrier
/*[53]*/
#pragma omp for nowait
/*[53]*/
/*[53]*/
/*[53]*/
for (j = 0; j < ny; j++) {
/*[53]*/
/*[53]*/
jglob = j;
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (k = 0; k < nz; k++) {
/*[53]*/
/*[53]*/
double *_imopVarPre254;
/*[53]*/
int _imopVarPre255;
/*[53]*/
_imopVarPre254 = &u[nx - 1][j][k][0];
/*[53]*/
_imopVarPre255 = nx0 - 1;
/*[53]*/
exact(_imopVarPre255, jglob, k, _imopVarPre254);
/*[53]*/
}
}
}
}
/*[]*/
static void setcoeff() {
/*[]*/
/*[]*/
dxi = 1.0 / (nx0 - 1);
/*[]*/
deta = 1.0 / (ny0 - 1);
/*[]*/
dzeta = 1.0 / (nz0 - 1);
/*[]*/
tx1 = 1.0 / (dxi * dxi);
/*[]*/
tx2 = 1.0 / (2.0 * dxi);
/*[]*/
tx3 = 1.0 / dxi;
/*[]*/
ty1 = 1.0 / (deta * deta);
/*[]*/
ty2 = 1.0 / (2.0 * deta);
/*[]*/
ty3 = 1.0 / deta;
/*[]*/
tz1 = 1.0 / (dzeta * dzeta);
/*[]*/
tz2 = 1.0 / (2.0 * dzeta);
/*[]*/
tz3 = 1.0 / dzeta;
/*[]*/
ii1 = 1;
/*[]*/
ii2 = nx0 - 2;
/*[]*/
ji1 = 1;
/*[]*/
ji2 = ny0 - 3;
/*[]*/
ki1 = 2;
/*[]*/
ki2 = nz0 - 2;
/*[]*/
dx1 = 0.75;
/*[]*/
dx2 = dx1;
/*[]*/
dx3 = dx1;
/*[]*/
dx4 = dx1;
/*[]*/
dx5 = dx1;
/*[]*/
dy1 = 0.75;
/*[]*/
dy2 = dy1;
/*[]*/
dy3 = dy1;
/*[]*/
dy4 = dy1;
/*[]*/
dy5 = dy1;
/*[]*/
dz1 = 1.00;
/*[]*/
dz2 = dz1;
/*[]*/
dz3 = dz1;
/*[]*/
dz4 = dz1;
/*[]*/
dz5 = dz1;
/*[]*/
int _imopVarPre348;
/*[]*/
double _imopVarPre349;
/*[]*/
int _imopVarPre350;
/*[]*/
double _imopVarPre351;
/*[]*/
int _imopVarPre358;
/*[]*/
double _imopVarPre359;
/*[]*/
_imopVarPre348 = (dy1 > dz1);
/*[]*/
/*[]*/
if (_imopVarPre348) {
/*[]*/
/*[]*/
_imopVarPre349 = dy1;
} else {
/*[]*/
/*[]*/
_imopVarPre349 = dz1;
}
/*[]*/
_imopVarPre350 = (dx1 > _imopVarPre349);
/*[]*/
/*[]*/
if (_imopVarPre350) {
/*[]*/
/*[]*/
_imopVarPre351 = dx1;
} else {
/*[]*/
/*[]*/
_imopVarPre358 = (dy1 > dz1);
/*[]*/
/*[]*/
if (_imopVarPre358) {
/*[]*/
/*[]*/
_imopVarPre359 = dy1;
} else {
/*[]*/
/*[]*/
_imopVarPre359 = dz1;
}
/*[]*/
_imopVarPre351 = _imopVarPre359;
}
/*[]*/
dssp = _imopVarPre351 / 4.0;
/*[]*/
ce[0][0] = 2.0;
/*[]*/
ce[0][1] = 0.0;
/*[]*/
ce[0][2] = 0.0;
/*[]*/
ce[0][3] = 4.0;
/*[]*/
ce[0][4] = 5.0;
/*[]*/
ce[0][5] = 3.0;
/*[]*/
ce[0][6] = 5.0e-01;
/*[]*/
ce[0][7] = 2.0e-02;
/*[]*/
ce[0][8] = 1.0e-02;
/*[]*/
ce[0][9] = 3.0e-02;
/*[]*/
ce[0][10] = 5.0e-01;
/*[]*/
ce[0][11] = 4.0e-01;
/*[]*/
ce[0][12] = 3.0e-01;
/*[]*/
ce[1][0] = 1.0;
/*[]*/
ce[1][1] = 0.0;
/*[]*/
ce[1][2] = 0.0;
/*[]*/
ce[1][3] = 0.0;
/*[]*/
ce[1][4] = 1.0;
/*[]*/
ce[1][5] = 2.0;
/*[]*/
ce[1][6] = 3.0;
/*[]*/
ce[1][7] = 1.0e-02;
/*[]*/
ce[1][8] = 3.0e-02;
/*[]*/
ce[1][9] = 2.0e-02;
/*[]*/
ce[1][10] = 4.0e-01;
/*[]*/
ce[1][11] = 3.0e-01;
/*[]*/
ce[1][12] = 5.0e-01;
/*[]*/
ce[2][0] = 2.0;
/*[]*/
ce[2][1] = 2.0;
/*[]*/
ce[2][2] = 0.0;
/*[]*/
ce[2][3] = 0.0;
/*[]*/
ce[2][4] = 0.0;
/*[]*/
ce[2][5] = 2.0;
/*[]*/
ce[2][6] = 3.0;
/*[]*/
ce[2][7] = 4.0e-02;
/*[]*/
ce[2][8] = 3.0e-02;
/*[]*/
ce[2][9] = 5.0e-02;
/*[]*/
ce[2][10] = 3.0e-01;
/*[]*/
ce[2][11] = 5.0e-01;
/*[]*/
ce[2][12] = 4.0e-01;
/*[]*/
ce[3][0] = 2.0;
/*[]*/
ce[3][1] = 2.0;
/*[]*/
ce[3][2] = 0.0;
/*[]*/
ce[3][3] = 0.0;
/*[]*/
ce[3][4] = 0.0;
/*[]*/
ce[3][5] = 2.0;
/*[]*/
ce[3][6] = 3.0;
/*[]*/
ce[3][7] = 3.0e-02;
/*[]*/
ce[3][8] = 5.0e-02;
/*[]*/
ce[3][9] = 4.0e-02;
/*[]*/
ce[3][10] = 2.0e-01;
/*[]*/
ce[3][11] = 1.0e-01;
/*[]*/
ce[3][12] = 3.0e-01;
/*[]*/
ce[4][0] = 5.0;
/*[]*/
ce[4][1] = 4.0;
/*[]*/
ce[4][2] = 3.0;
/*[]*/
ce[4][3] = 2.0;
/*[]*/
ce[4][4] = 1.0e-01;
/*[]*/
ce[4][5] = 4.0e-01;
/*[]*/
ce[4][6] = 3.0e-01;
/*[]*/
ce[4][7] = 5.0e-02;
/*[]*/
ce[4][8] = 4.0e-02;
/*[]*/
ce[4][9] = 3.0e-02;
/*[]*/
ce[4][10] = 1.0e-01;
/*[]*/
ce[4][11] = 3.0e-01;
/*[]*/
ce[4][12] = 2.0e-01;
}
/*[]*/
static void setiv() {
/*[]*/
/*[54]*/
#pragma omp parallel
{
/*[54]*/
/*[54]*/
int i;
/*[54]*/
int j;
/*[54]*/
int k;
/*[54]*/
int m;
/*[54]*/
int iglob;
/*[54]*/
int jglob;
/*[54]*/
double xi;
/*[54]*/
double eta;
/*[54]*/
double zeta;
/*[54]*/
double pxi;
/*[54]*/
double peta;
/*[54]*/
double pzeta;
/*[54]*/
double ue_1jk[5];
/*[54]*/
double ue_nx0jk[5];
/*[54]*/
double ue_i1k[5];
/*[54]*/
double ue_iny0k[5];
/*[54]*/
double ue_ij1[5];
/*[54]*/
double ue_ijnz[5];
/*[54]*/
#pragma omp for nowait
/*[54]*/
/*[54]*/
/*[54]*/
for (j = 0; j < ny; j++) {
/*[54]*/
/*[54]*/
jglob = j;
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (k = 1; k < nz - 1; k++) {
/*[54]*/
/*[54]*/
zeta = ((double) k) / (nz - 1);
/*[54]*/
int _imopVarPre361;
/*[54]*/
_imopVarPre361 = jglob != 0;
/*[54]*/
/*[54]*/
if (_imopVarPre361) {
/*[54]*/
/*[54]*/
_imopVarPre361 = jglob != ny0 - 1;
}
/*[54]*/
/*[54]*/
if (_imopVarPre361) {
/*[54]*/
/*[54]*/
eta = ((double) jglob) / (ny0 - 1);
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (i = 0; i < nx; i++) {
/*[54]*/
/*[54]*/
iglob = i;
/*[54]*/
int _imopVarPre363;
/*[54]*/
_imopVarPre363 = iglob != 0;
/*[54]*/
/*[54]*/
if (_imopVarPre363) {
/*[54]*/
/*[54]*/
_imopVarPre363 = iglob != nx0 - 1;
}
/*[54]*/
/*[54]*/
if (_imopVarPre363) {
/*[54]*/
/*[54]*/
xi = ((double) iglob) / (nx0 - 1);
/*[54]*/
exact(0, jglob, k, ue_1jk);
/*[54]*/
/*[54]*/
int _imopVarPre365;
/*[54]*/
_imopVarPre365 = nx0 - 1;
/*[54]*/
exact(_imopVarPre365, jglob, k, ue_nx0jk);
/*[54]*/
/*[54]*/
exact(iglob, 0, k, ue_i1k);
/*[54]*/
/*[54]*/
int _imopVarPre367;
/*[54]*/
_imopVarPre367 = ny0 - 1;
/*[54]*/
exact(iglob, _imopVarPre367, k, ue_iny0k);
/*[54]*/
/*[54]*/
exact(iglob, jglob, 0, ue_ij1);
/*[54]*/
/*[54]*/
int _imopVarPre369;
/*[54]*/
_imopVarPre369 = nz - 1;
/*[54]*/
exact(iglob, jglob, _imopVarPre369, ue_ijnz);
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (m = 0; m < 5; m++) {
/*[54]*/
/*[54]*/
pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m];
/*[54]*/
peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m];
/*[54]*/
pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m];
/*[54]*/
u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*[]*/
static void ssor() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int istep;
/*[]*/
double tmp;
/*[]*/
double delunm[5];
/*[]*/
double tv[12][12][5];
/*[]*/
tmp = 1.0 / (omega * (2.0 - omega));
/*[55]*/
#pragma omp parallel private(i, j, k, m)
{
/*[55]*/
/*[55]*/
#pragma omp for nowait
/*[55]*/
/*[55]*/
/*[55]*/
for (i = 0; i < 12; i++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (j = 0; j < 12; j++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (k = 0; k < 5; k++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (m = 0; m < 5; m++) {
/*[55]*/
/*[55]*/
a[i][j][k][m] = 0.0;
/*[55]*/
b[i][j][k][m] = 0.0;
/*[55]*/
c[i][j][k][m] = 0.0;
/*[55]*/
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*[56]*/
#pragma omp parallel
{
/*[56]*/
/*[56]*/
int i_imopVarPre84;
/*[56]*/
int j_imopVarPre85;
/*[56]*/
int k_imopVarPre86;
/*[56]*/
int m_imopVarPre87;
/*[56]*/
int L1;
/*[56]*/
int L2;
/*[56]*/
int ist1;
/*[56]*/
int iend1;
/*[56]*/
int jst1;
/*[56]*/
int jend1;
/*[56]*/
double q;
/*[56]*/
double u21;
/*[56]*/
double u31;
/*[56]*/
double u41;
/*[56]*/
double tmp_imopVarPre88;
/*[56]*/
double u21i;
/*[56]*/
double u31i;
/*[56]*/
double u41i;
/*[56]*/
double u51i;
/*[56]*/
double u21j;
/*[56]*/
double u31j;
/*[56]*/
double u41j;
/*[56]*/
double u51j;
/*[56]*/
double u21k;
/*[56]*/
double u31k;
/*[56]*/
double u41k;
/*[56]*/
double u51k;
/*[56]*/
double u21im1;
/*[56]*/
double u31im1;
/*[56]*/
double u41im1;
/*[56]*/
double u51im1;
/*[56]*/
double u21jm1;
/*[56]*/
double u31jm1;
/*[56]*/
double u41jm1;
/*[56]*/
double u51jm1;
/*[56]*/
double u21km1;
/*[56]*/
double u31km1;
/*[56]*/
double u41km1;
/*[56]*/
double u51km1;
/*[56]*/
#pragma omp for nowait
/*[56]*/
/*[56]*/
/*[56]*/
for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[56]*/
/*[56]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87];
}
}
}
}
/*[56]*/
L1 = 0;
/*[56]*/
L2 = nx - 1;
/*[56]*/
#pragma omp for nowait
/*[56]*/
/*[56]*/
/*[56]*/
for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[56]*/
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[56]*/
u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[56]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21;
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21;
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21;
}
}
}
/*[56]*/
// #pragma omp dummyFlush BARRIER_START
/*[56]*/
#pragma omp barrier
/*[57]*/
#pragma omp for nowait
/*[57]*/
/*[57]*/
/*[57]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[57]*/
L2 = nx - 1;
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[57]*/
u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[57]*/
u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[57]*/
u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[57]*/
u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[57]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0];
/*[57]*/
u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1];
/*[57]*/
u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2];
/*[57]*/
u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3];
/*[57]*/
u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4];
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]);
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[57]*/
rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
/*[57]*/
ist1 = 3;
/*[57]*/
iend1 = nx - 4;
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[57]*/
rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[57]*/
// #pragma omp dummyFlush BARRIER_START
/*[57]*/
#pragma omp barrier
/*[58]*/
L1 = 0;
/*[58]*/
L2 = ny - 1;
/*[58]*/
#pragma omp for nowait
/*[58]*/
/*[58]*/
/*[58]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[58]*/
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[58]*/
u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[58]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31;
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31;
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31;
}
}
}
/*[58]*/
// #pragma omp dummyFlush BARRIER_START
/*[58]*/
#pragma omp barrier
/*[59]*/
#pragma omp for nowait
/*[59]*/
/*[59]*/
/*[59]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[59]*/
L2 = ny - 1;
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[59]*/
u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[59]*/
u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[59]*/
u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[59]*/
u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[59]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0];
/*[59]*/
u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1];
/*[59]*/
u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2];
/*[59]*/
u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3];
/*[59]*/
u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4];
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]);
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]);
/*[59]*/
rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]);
}
/*[59]*/
jst1 = 3;
/*[59]*/
jend1 = ny - 4;
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
/*[59]*/
rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[59]*/
// #pragma omp dummyFlush BARRIER_START
/*[59]*/
#pragma omp barrier
/*[60]*/
#pragma omp for nowait
/*[60]*/
/*[60]*/
/*[60]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[60]*/
u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41;
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41;
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41;
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]);
}
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[60]*/
u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[60]*/
u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[60]*/
u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[60]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0];
/*[60]*/
u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1];
/*[60]*/
u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2];
/*[60]*/
u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3];
/*[60]*/
u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4];
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]);
}
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
}
}
}
}
/*[61]*/
#pragma omp parallel
{
/*[61]*/
/*[61]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[61]*/
double *sum;
/*[61]*/
v = rsd;
/*[61]*/
sum = rsdnm;
/*[61]*/
int i_imopVarPre75;
/*[61]*/
int j_imopVarPre76;
/*[61]*/
int k_imopVarPre77;
/*[61]*/
int m_imopVarPre78;
/*[61]*/
double sum0 = 0.0;
/*[61]*/
double sum1 = 0.0;
/*[61]*/
double sum2 = 0.0;
/*[61]*/
double sum3 = 0.0;
/*[61]*/
double sum4 = 0.0;
/*[61]*/
#pragma omp single nowait
{
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[61]*/
/*[61]*/
sum[m_imopVarPre78] = 0.0;
}
}
/*[61]*/
// #pragma omp dummyFlush BARRIER_START
/*[61]*/
#pragma omp barrier
/*[62]*/
#pragma omp for nowait
/*[62]*/
/*[62]*/
/*[62]*/
for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) {
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) {
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) {
/*[62]*/
/*[62]*/
sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0];
/*[62]*/
sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1];
/*[62]*/
sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2];
/*[62]*/
sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3];
/*[62]*/
sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4];
}
}
}
/*[62]*/
// #pragma omp dummyFlush CRITICAL_START
/*[62]*/
#pragma omp critical
{
/*[62]*/
/*[62]*/
sum[0] += sum0;
/*[62]*/
sum[1] += sum1;
/*[62]*/
sum[2] += sum2;
/*[62]*/
sum[3] += sum3;
/*[62]*/
sum[4] += sum4;
}
/*[62]*/
// #pragma omp dummyFlush CRITICAL_END
/*[62]*/
// #pragma omp dummyFlush BARRIER_START
/*[62]*/
#pragma omp barrier
/*[63]*/
#pragma omp single nowait
{
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[63]*/
/*[63]*/
double _imopVarPre154;
/*[63]*/
double _imopVarPre155;
/*[63]*/
_imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[63]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[63]*/
/*[63]*/
sum[m_imopVarPre78] = _imopVarPre155;
}
}
}
/*[]*/
timer_clear(1);
/*[]*/
/*[]*/
timer_start(1);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (istep = 1; istep <= itmax; istep++) {
/*[]*/
/*[]*/
int _imopVarPre372;
/*[]*/
int _imopVarPre370;
/*[]*/
int _imopVarPre371;
/*[]*/
_imopVarPre370 = istep % 20 == 0;
/*[]*/
/*[]*/
if (!_imopVarPre370) {
/*[]*/
/*[]*/
_imopVarPre371 = istep == itmax;
/*[]*/
/*[]*/
if (!_imopVarPre371) {
/*[]*/
/*[]*/
_imopVarPre371 = istep == 1;
}
/*[]*/
_imopVarPre370 = _imopVarPre371;
}
/*[]*/
/*[]*/
if (_imopVarPre370) {
/*[]*/
/*[]*/
#pragma omp master
{
/*[]*/
/*[]*/
printf(" Time step %4d\n", istep);
/*[]*/
}
}
/*[64]*/
#pragma omp parallel private(istep, i, j, k, m)
{
/*[64]*/
/*[64]*/
int _imopVarPre377;
/*[64]*/
int _imopVarPre378;
/*[64]*/
int _imopVarPre379;
/*[64]*/
int _imopVarPre380;
/*[64]*/
#pragma omp for nowait
/*[64]*/
/*[64]*/
/*[64]*/
for (i = ist; i <= iend; i++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (j = jst; j <= jend; j++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (k = 1; k <= nz - 2; k++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (m = 0; m < 5; m++) {
/*[64]*/
/*[64]*/
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
/*[64]*/
// #pragma omp dummyFlush BARRIER_START
/*[64]*/
#pragma omp barrier
/*[41]*/
/*[41]*/
/*[41]*/
/*[41]*/
for (k = 1; k <= nz - 2; k++) {
/*[41]*/
/*[41]*/
jacld(k);
/*[41]*/
/*[41]*/
blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0);
/*[41]*/
}
/*[41]*/
// #pragma omp dummyFlush BARRIER_START
/*[41]*/
#pragma omp barrier
/*[42]*/
/*[42]*/
/*[42]*/
/*[42]*/
for (k = nz - 2; k >= 1; k--) {
/*[42]*/
/*[42]*/
jacu(k);
/*[42]*/
/*[42]*/
buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0);
/*[42]*/
}
/*[42]*/
// #pragma omp dummyFlush BARRIER_START
/*[42]*/
#pragma omp barrier
/*[65]*/
#pragma omp for nowait
/*[65]*/
/*[65]*/
/*[65]*/
for (i = ist; i <= iend; i++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (j = jst; j <= jend; j++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (k = 1; k <= nz - 2; k++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (m = 0; m < 5; m++) {
/*[65]*/
/*[65]*/
u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];
}
}
}
}
/*[65]*/
/*[65]*/
if (istep % inorm == 0) {
/*[65]*/
/*[65]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[65]*/
double *sum;
/*[65]*/
v = rsd;
/*[65]*/
sum = delunm;
/*[65]*/
int i_imopVarPre89;
/*[65]*/
int j_imopVarPre90;
/*[65]*/
int k_imopVarPre91;
/*[65]*/
int m_imopVarPre92;
/*[65]*/
double sum0 = 0.0;
/*[65]*/
double sum1 = 0.0;
/*[65]*/
double sum2 = 0.0;
/*[65]*/
double sum3 = 0.0;
/*[65]*/
double sum4 = 0.0;
/*[65]*/
#pragma omp single nowait
{
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[65]*/
/*[65]*/
sum[m_imopVarPre92] = 0.0;
}
}
/*[65]*/
// #pragma omp dummyFlush BARRIER_START
/*[65]*/
#pragma omp barrier
/*[66]*/
#pragma omp for nowait
/*[66]*/
/*[66]*/
/*[66]*/
for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) {
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) {
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) {
/*[66]*/
/*[66]*/
sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0];
/*[66]*/
sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1];
/*[66]*/
sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2];
/*[66]*/
sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3];
/*[66]*/
sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4];
}
}
}
/*[66]*/
// #pragma omp dummyFlush CRITICAL_START
/*[66]*/
#pragma omp critical
{
/*[66]*/
/*[66]*/
sum[0] += sum0;
/*[66]*/
sum[1] += sum1;
/*[66]*/
sum[2] += sum2;
/*[66]*/
sum[3] += sum3;
/*[66]*/
sum[4] += sum4;
}
/*[66]*/
// #pragma omp dummyFlush CRITICAL_END
/*[66]*/
// #pragma omp dummyFlush BARRIER_START
/*[66]*/
#pragma omp barrier
/*[67]*/
#pragma omp single nowait
{
/*[67]*/
/*[67]*/
/*[67]*/
/*[67]*/
/*[67]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[67]*/
/*[67]*/
double _imopVarPre154;
/*[67]*/
double _imopVarPre155;
/*[67]*/
_imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[67]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[67]*/
/*[67]*/
sum[m_imopVarPre92] = _imopVarPre155;
}
}
/*[67]*/
// #pragma omp dummyFlush BARRIER_START
/*[67]*/
#pragma omp barrier
/*[68]*/
// #pragma omp dummyFlush BARRIER_START
/*[68]*/
#pragma omp barrier
}
/*[65, 69]*/
// #pragma omp dummyFlush BARRIER_START
/*[65, 69]*/
#pragma omp barrier
/*[66, 70]*/
int i_imopVarPre79;
/*[66, 70]*/
int j_imopVarPre80;
/*[66, 70]*/
int k_imopVarPre81;
/*[66, 70]*/
int m_imopVarPre82;
/*[66, 70]*/
int L1;
/*[66, 70]*/
int L2;
/*[66, 70]*/
int ist1;
/*[66, 70]*/
int iend1;
/*[66, 70]*/
int jst1;
/*[66, 70]*/
int jend1;
/*[66, 70]*/
double q;
/*[66, 70]*/
double u21;
/*[66, 70]*/
double u31;
/*[66, 70]*/
double u41;
/*[66, 70]*/
double tmp_imopVarPre83;
/*[66, 70]*/
double u21i;
/*[66, 70]*/
double u31i;
/*[66, 70]*/
double u41i;
/*[66, 70]*/
double u51i;
/*[66, 70]*/
double u21j;
/*[66, 70]*/
double u31j;
/*[66, 70]*/
double u41j;
/*[66, 70]*/
double u51j;
/*[66, 70]*/
double u21k;
/*[66, 70]*/
double u31k;
/*[66, 70]*/
double u41k;
/*[66, 70]*/
double u51k;
/*[66, 70]*/
double u21im1;
/*[66, 70]*/
double u31im1;
/*[66, 70]*/
double u41im1;
/*[66, 70]*/
double u51im1;
/*[66, 70]*/
double u21jm1;
/*[66, 70]*/
double u31jm1;
/*[66, 70]*/
double u41jm1;
/*[66, 70]*/
double u51jm1;
/*[66, 70]*/
double u21km1;
/*[66, 70]*/
double u31km1;
/*[66, 70]*/
double u41km1;
/*[66, 70]*/
double u51km1;
/*[66, 70]*/
#pragma omp for nowait
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[66, 70]*/
/*[66, 70]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82];
}
}
}
}
/*[66, 70]*/
// #pragma omp dummyFlush BARRIER_START
/*[66, 70]*/
#pragma omp barrier
/*[67, 71]*/
L1 = 0;
/*[67, 71]*/
L2 = nx - 1;
/*[67, 71]*/
#pragma omp for nowait
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[67, 71]*/
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[67, 71]*/
u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[67, 71]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21;
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21;
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21;
}
}
}
/*[67, 71]*/
// #pragma omp dummyFlush BARRIER_START
/*[67, 71]*/
#pragma omp barrier
/*[68, 72]*/
#pragma omp for nowait
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[68, 72]*/
L2 = nx - 1;
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[68, 72]*/
u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[68, 72]*/
u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[68, 72]*/
u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[68, 72]*/
u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[68, 72]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0];
/*[68, 72]*/
u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1];
/*[68, 72]*/
u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2];
/*[68, 72]*/
u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3];
/*[68, 72]*/
u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4];
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]);
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[68, 72]*/
rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
/*[68, 72]*/
ist1 = 3;
/*[68, 72]*/
iend1 = nx - 4;
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[68, 72]*/
rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[68, 72]*/
// #pragma omp dummyFlush BARRIER_START
/*[68, 72]*/
#pragma omp barrier
/*[69, 73]*/
L1 = 0;
/*[69, 73]*/
L2 = ny - 1;
/*[69, 73]*/
#pragma omp for nowait
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[69, 73]*/
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[69, 73]*/
u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[69, 73]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31;
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31;
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31;
}
}
}
/*[69, 73]*/
// #pragma omp dummyFlush BARRIER_START
/*[69, 73]*/
#pragma omp barrier
/*[70, 74]*/
#pragma omp for nowait
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[70, 74]*/
L2 = ny - 1;
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[70, 74]*/
u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[70, 74]*/
u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[70, 74]*/
u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[70, 74]*/
u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[70, 74]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0];
/*[70, 74]*/
u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1];
/*[70, 74]*/
u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2];
/*[70, 74]*/
u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3];
/*[70, 74]*/
u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4];
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]);
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]);
/*[70, 74]*/
rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]);
}
/*[70, 74]*/
jst1 = 3;
/*[70, 74]*/
jend1 = ny - 4;
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
/*[70, 74]*/
rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[70, 74]*/
// #pragma omp dummyFlush BARRIER_START
/*[70, 74]*/
#pragma omp barrier
/*[71, 75]*/
#pragma omp for nowait
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[71, 75]*/
u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41;
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41;
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41;
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]);
}
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[71, 75]*/
u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[71, 75]*/
u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[71, 75]*/
u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[71, 75]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0];
/*[71, 75]*/
u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1];
/*[71, 75]*/
u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2];
/*[71, 75]*/
u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3];
/*[71, 75]*/
u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4];
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]);
}
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
}
}
}
/*[71, 75]*/
// #pragma omp dummyFlush BARRIER_START
/*[71, 75]*/
#pragma omp barrier
/*[72, 76]*/
// #pragma omp dummyFlush BARRIER_START
/*[72, 76]*/
#pragma omp barrier
/*[73, 77]*/
#pragma omp master
{
/*[73, 77]*/
/*[73, 77]*/
_imopVarPre372 = (istep % inorm == 0);
/*[73, 77]*/
/*[73, 77]*/
if (!_imopVarPre372) {
/*[73, 77]*/
/*[73, 77]*/
_imopVarPre372 = (istep == itmax);
}
}
/*[73, 77]*/
// #pragma omp dummyFlush BARRIER_START
/*[73, 77]*/
#pragma omp barrier
/*[74]*/
/*[74]*/
if (_imopVarPre372) {
/*[74]*/
/*[74]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[74]*/
double *sum;
/*[74]*/
v = rsd;
/*[74]*/
sum = rsdnm;
/*[74]*/
int i_imopVarPre93;
/*[74]*/
int j_imopVarPre94;
/*[74]*/
int k_imopVarPre95;
/*[74]*/
int m_imopVarPre96;
/*[74]*/
double sum0 = 0.0;
/*[74]*/
double sum1 = 0.0;
/*[74]*/
double sum2 = 0.0;
/*[74]*/
double sum3 = 0.0;
/*[74]*/
double sum4 = 0.0;
/*[74]*/
#pragma omp single nowait
{
/*[74]*/
/*[74]*/
/*[74]*/
/*[74]*/
/*[74]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[74]*/
/*[74]*/
sum[m_imopVarPre96] = 0.0;
}
}
/*[74]*/
// #pragma omp dummyFlush BARRIER_START
/*[74]*/
#pragma omp barrier
/*[75]*/
#pragma omp for nowait
/*[75]*/
/*[75]*/
/*[75]*/
for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) {
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) {
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) {
/*[75]*/
/*[75]*/
sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0];
/*[75]*/
sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1];
/*[75]*/
sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2];
/*[75]*/
sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3];
/*[75]*/
sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4];
}
}
}
/*[75]*/
// #pragma omp dummyFlush CRITICAL_START
/*[75]*/
#pragma omp critical
{
/*[75]*/
/*[75]*/
sum[0] += sum0;
/*[75]*/
sum[1] += sum1;
/*[75]*/
sum[2] += sum2;
/*[75]*/
sum[3] += sum3;
/*[75]*/
sum[4] += sum4;
}
/*[75]*/
// #pragma omp dummyFlush CRITICAL_END
/*[75]*/
// #pragma omp dummyFlush BARRIER_START
/*[75]*/
#pragma omp barrier
/*[76]*/
#pragma omp single nowait
{
/*[76]*/
/*[76]*/
/*[76]*/
/*[76]*/
/*[76]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[76]*/
/*[76]*/
double _imopVarPre154;
/*[76]*/
double _imopVarPre155;
/*[76]*/
_imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[76]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[76]*/
/*[76]*/
sum[m_imopVarPre96] = _imopVarPre155;
}
}
}
/*[74, 76]*/
// #pragma omp dummyFlush BARRIER_START
/*[74, 76]*/
#pragma omp barrier
/*[75, 77]*/
#pragma omp master
{
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre377 = (rsdnm[0] < tolrsd[0]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre377) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre378 = (rsdnm[1] < tolrsd[1]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre378) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre379 = (rsdnm[2] < tolrsd[2]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre379) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre380 = (rsdnm[3] < tolrsd[3]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre380) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre380 = (rsdnm[4] < tolrsd[4]);
}
/*[75, 77]*/
_imopVarPre379 = _imopVarPre380;
}
/*[75, 77]*/
_imopVarPre378 = _imopVarPre379;
}
/*[75, 77]*/
_imopVarPre377 = _imopVarPre378;
}
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre377) {
/*[75, 77]*/
/*[75, 77]*/
exit(1);
/*[75, 77]*/
}
}
}
}
/*[]*/
timer_stop(1);
/*[]*/
/*[]*/
maxtime = timer_read(1);
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified) {
/*[]*/
/*[]*/
double xcrref[5];
/*[]*/
double xceref[5];
/*[]*/
double xciref;
/*[]*/
double xcrdif[5];
/*[]*/
double xcedif[5];
/*[]*/
double xcidif;
/*[]*/
double epsilon;
/*[]*/
double dtref;
/*[]*/
int m;
/*[]*/
epsilon = 1.0e-08;
/*[]*/
*class = 'U';
/*[]*/
*verified = 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
xcrref[m] = 1.0;
/*[]*/
xceref[m] = 1.0;
}
/*[]*/
xciref = 1.0;
/*[]*/
int _imopVarPre384;
/*[]*/
int _imopVarPre385;
/*[]*/
int _imopVarPre386;
/*[]*/
_imopVarPre384 = nx0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre384) {
/*[]*/
/*[]*/
_imopVarPre385 = ny0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre385) {
/*[]*/
/*[]*/
_imopVarPre386 = nz0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre386) {
/*[]*/
/*[]*/
_imopVarPre386 = itmax == 50;
}
/*[]*/
_imopVarPre385 = _imopVarPre386;
}
/*[]*/
_imopVarPre384 = _imopVarPre385;
}
/*[]*/
/*[]*/
if (_imopVarPre384) {
/*[]*/
/*[]*/
*class = 'S';
/*[]*/
dtref = 5.0e-1;
/*[]*/
xcrref[0] = 1.6196343210976702e-02;
/*[]*/
xcrref[1] = 2.1976745164821318e-03;
/*[]*/
xcrref[2] = 1.5179927653399185e-03;
/*[]*/
xcrref[3] = 1.5029584435994323e-03;
/*[]*/
xcrref[4] = 3.4264073155896461e-02;
/*[]*/
xceref[0] = 6.4223319957960924e-04;
/*[]*/
xceref[1] = 8.4144342047347926e-05;
/*[]*/
xceref[2] = 5.8588269616485186e-05;
/*[]*/
xceref[3] = 5.8474222595157350e-05;
/*[]*/
xceref[4] = 1.3103347914111294e-03;
/*[]*/
xciref = 7.8418928865937083;
} else {
/*[]*/
/*[]*/
int _imopVarPre390;
/*[]*/
int _imopVarPre391;
/*[]*/
int _imopVarPre392;
/*[]*/
_imopVarPre390 = nx0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre390) {
/*[]*/
/*[]*/
_imopVarPre391 = ny0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre391) {
/*[]*/
/*[]*/
_imopVarPre392 = nz0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre392) {
/*[]*/
/*[]*/
_imopVarPre392 = itmax == 300;
}
/*[]*/
_imopVarPre391 = _imopVarPre392;
}
/*[]*/
_imopVarPre390 = _imopVarPre391;
}
/*[]*/
/*[]*/
if (_imopVarPre390) {
/*[]*/
/*[]*/
*class = 'W';
/*[]*/
dtref = 1.5e-3;
/*[]*/
xcrref[0] = 0.1236511638192e+02;
/*[]*/
xcrref[1] = 0.1317228477799e+01;
/*[]*/
xcrref[2] = 0.2550120713095e+01;
/*[]*/
xcrref[3] = 0.2326187750252e+01;
/*[]*/
xcrref[4] = 0.2826799444189e+02;
/*[]*/
xceref[0] = 0.4867877144216;
/*[]*/
xceref[1] = 0.5064652880982e-01;
/*[]*/
xceref[2] = 0.9281818101960e-01;
/*[]*/
xceref[3] = 0.8570126542733e-01;
/*[]*/
xceref[4] = 0.1084277417792e+01;
/*[]*/
xciref = 0.1161399311023e+02;
} else {
/*[]*/
/*[]*/
int _imopVarPre396;
/*[]*/
int _imopVarPre397;
/*[]*/
int _imopVarPre398;
/*[]*/
_imopVarPre396 = nx0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre396) {
/*[]*/
/*[]*/
_imopVarPre397 = ny0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre397) {
/*[]*/
/*[]*/
_imopVarPre398 = nz0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre398) {
/*[]*/
/*[]*/
_imopVarPre398 = itmax == 250;
}
/*[]*/
_imopVarPre397 = _imopVarPre398;
}
/*[]*/
_imopVarPre396 = _imopVarPre397;
}
/*[]*/
/*[]*/
if (_imopVarPre396) {
/*[]*/
/*[]*/
*class = 'A';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 7.7902107606689367e+02;
/*[]*/
xcrref[1] = 6.3402765259692870e+01;
/*[]*/
xcrref[2] = 1.9499249727292479e+02;
/*[]*/
xcrref[3] = 1.7845301160418537e+02;
/*[]*/
xcrref[4] = 1.8384760349464247e+03;
/*[]*/
xceref[0] = 2.9964085685471943e+01;
/*[]*/
xceref[1] = 2.8194576365003349;
/*[]*/
xceref[2] = 7.3473412698774742;
/*[]*/
xceref[3] = 6.7139225687777051;
/*[]*/
xceref[4] = 7.0715315688392578e+01;
/*[]*/
xciref = 2.6030925604886277e+01;
} else {
/*[]*/
/*[]*/
int _imopVarPre402;
/*[]*/
int _imopVarPre403;
/*[]*/
int _imopVarPre404;
/*[]*/
_imopVarPre402 = nx0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre402) {
/*[]*/
/*[]*/
_imopVarPre403 = ny0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre403) {
/*[]*/
/*[]*/
_imopVarPre404 = nz0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre404) {
/*[]*/
/*[]*/
_imopVarPre404 = itmax == 250;
}
/*[]*/
_imopVarPre403 = _imopVarPre404;
}
/*[]*/
_imopVarPre402 = _imopVarPre403;
}
/*[]*/
/*[]*/
if (_imopVarPre402) {
/*[]*/
/*[]*/
*class = 'B';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 3.5532672969982736e+03;
/*[]*/
xcrref[1] = 2.6214750795310692e+02;
/*[]*/
xcrref[2] = 8.8333721850952190e+02;
/*[]*/
xcrref[3] = 7.7812774739425265e+02;
/*[]*/
xcrref[4] = 7.3087969592545314e+03;
/*[]*/
xceref[0] = 1.1401176380212709e+02;
/*[]*/
xceref[1] = 8.1098963655421574;
/*[]*/
xceref[2] = 2.8480597317698308e+01;
/*[]*/
xceref[3] = 2.5905394567832939e+01;
/*[]*/
xceref[4] = 2.6054907504857413e+02;
/*[]*/
xciref = 4.7887162703308227e+01;
} else {
/*[]*/
/*[]*/
int _imopVarPre408;
/*[]*/
int _imopVarPre409;
/*[]*/
int _imopVarPre410;
/*[]*/
_imopVarPre408 = nx0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre408) {
/*[]*/
/*[]*/
_imopVarPre409 = ny0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre409) {
/*[]*/
/*[]*/
_imopVarPre410 = nz0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre410) {
/*[]*/
/*[]*/
_imopVarPre410 = itmax == 250;
}
/*[]*/
_imopVarPre409 = _imopVarPre410;
}
/*[]*/
_imopVarPre408 = _imopVarPre409;
}
/*[]*/
/*[]*/
if (_imopVarPre408) {
/*[]*/
/*[]*/
*class = 'C';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 1.03766980323537846e+04;
/*[]*/
xcrref[1] = 8.92212458801008552e+02;
/*[]*/
xcrref[2] = 2.56238814582660871e+03;
/*[]*/
xcrref[3] = 2.19194343857831427e+03;
/*[]*/
xcrref[4] = 1.78078057261061185e+04;
/*[]*/
xceref[0] = 2.15986399716949279e+02;
/*[]*/
xceref[1] = 1.55789559239863600e+01;
/*[]*/
xceref[2] = 5.41318863077207766e+01;
/*[]*/
xceref[3] = 4.82262643154045421e+01;
/*[]*/
xceref[4] = 4.55902910043250358e+02;
/*[]*/
xciref = 6.66404553572181300e+01;
} else {
/*[]*/
/*[]*/
*verified = 0;
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre412;
/*[]*/
double _imopVarPre413;
/*[]*/
_imopVarPre412 = (xcr[m] - xcrref[m]) / xcrref[m];
/*[]*/
_imopVarPre413 = fabs(_imopVarPre412);
/*[]*/
/*[]*/
xcrdif[m] = _imopVarPre413;
/*[]*/
double _imopVarPre415;
/*[]*/
double _imopVarPre416;
/*[]*/
_imopVarPre415 = (xce[m] - xceref[m]) / xceref[m];
/*[]*/
_imopVarPre416 = fabs(_imopVarPre415);
/*[]*/
/*[]*/
xcedif[m] = _imopVarPre416;
}
/*[]*/
double _imopVarPre418;
/*[]*/
double _imopVarPre419;
/*[]*/
_imopVarPre418 = (xci - xciref) / xciref;
/*[]*/
_imopVarPre419 = fabs(_imopVarPre418);
/*[]*/
/*[]*/
xcidif = _imopVarPre419;
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
char _imopVarPre421;
/*[]*/
_imopVarPre421 = *class;
/*[]*/
printf("\n Verification being performed for class %1c\n", _imopVarPre421);
/*[]*/
/*[]*/
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
/*[]*/
/*[]*/
double _imopVarPre424;
/*[]*/
double _imopVarPre425;
/*[]*/
_imopVarPre424 = dt - dtref;
/*[]*/
_imopVarPre425 = fabs(_imopVarPre424);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre425 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
*class = 'U';
/*[]*/
printf(" DT does not match the reference value of %15.8e\n", dtref);
/*[]*/
}
} else {
/*[]*/
/*[]*/
printf(" Unknown class\n");
/*[]*/
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of RMS-norms of residual\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" RMS-norms of residual\n");
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
double _imopVarPre427;
/*[]*/
_imopVarPre427 = xcr[m];
/*[]*/
printf(" %2d %20.13e\n", m, _imopVarPre427);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcrdif[m] > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
double _imopVarPre431;
/*[]*/
double _imopVarPre432;
/*[]*/
double _imopVarPre433;
/*[]*/
_imopVarPre431 = xcrdif[m];
/*[]*/
_imopVarPre432 = xcrref[m];
/*[]*/
_imopVarPre433 = xcr[m];
/*[]*/
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre433, _imopVarPre432, _imopVarPre431);
/*[]*/
} else {
/*[]*/
/*[]*/
double _imopVarPre437;
/*[]*/
double _imopVarPre438;
/*[]*/
double _imopVarPre439;
/*[]*/
_imopVarPre437 = xcrdif[m];
/*[]*/
_imopVarPre438 = xcrref[m];
/*[]*/
_imopVarPre439 = xcr[m];
/*[]*/
printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre439, _imopVarPre438, _imopVarPre437);
/*[]*/
}
}
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of RMS-norms of solution error\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" RMS-norms of solution error\n");
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
double _imopVarPre441;
/*[]*/
_imopVarPre441 = xce[m];
/*[]*/
printf(" %2d %20.13e\n", m, _imopVarPre441);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcedif[m] > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
double _imopVarPre445;
/*[]*/
double _imopVarPre446;
/*[]*/
double _imopVarPre447;
/*[]*/
_imopVarPre445 = xcedif[m];
/*[]*/
_imopVarPre446 = xceref[m];
/*[]*/
_imopVarPre447 = xce[m];
/*[]*/
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre447, _imopVarPre446, _imopVarPre445);
/*[]*/
} else {
/*[]*/
/*[]*/
double _imopVarPre451;
/*[]*/
double _imopVarPre452;
/*[]*/
double _imopVarPre453;
/*[]*/
_imopVarPre451 = xcedif[m];
/*[]*/
_imopVarPre452 = xceref[m];
/*[]*/
_imopVarPre453 = xce[m];
/*[]*/
printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre453, _imopVarPre452, _imopVarPre451);
/*[]*/
}
}
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of surface integral\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" Surface integral\n");
/*[]*/
}
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
printf(" %20.13e\n", xci);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcidif > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif);
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif);
/*[]*/
}
}
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
printf(" No reference values provided\n");
/*[]*/
/*[]*/
printf(" No verification performed\n");
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (*verified) {
/*[]*/
/*[]*/
printf(" Verification Successful\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" Verification failed\n");
/*[]*/
}
}
}
|
generated-funcs.c | // Check that the CHECK lines are generated for clang-generated functions
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck --check-prefix=OMP %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -emit-llvm -o - | FileCheck --check-prefix=NOOMP %s
const int size = 1024 * 1024 * 32;
double A[size];
void foo(void);
int main() {
int i = 0;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
A[i] = 0.0;
}
foo();
return 0;
}
void foo(void) {
int i = 0;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
A[i] = 1.0;
}
}
|
reduce_to_width_mex.c | #include "mex.h"
#include <immintrin.h>
#include "simd_guard.h"
//
// For compiling instructions, see big_plot.compile()
//
// Flags:
// ENABLE_SIMD
//Status
//-----------
//1) Parallel min and max across threads
//2) Starts at an arbitrary index into the data (for processing subsets)
//3) All classes supported
//4) Most of SIMD is implemented ...
#ifdef ENABLE_SIMD
#define SIMD_ENABLED 1
#else
#define SIMD_ENABLED 0
#endif
#ifdef _MSC_VER
#define PRAGMA __pragma
#else
#define PRAGMA _Pragma
#endif
//200203 - VS2017
mwSize getScalarInput(const mxArray *input, int input_number){
//
// Inputs
// -------
// input_number : 1 based
// Used for error reporting
if (!mxIsClass(input,"double")){
mexErrMsgIdAndTxt("SL:reduce_to_width:input_class_type",
"Input #%d type needs to be double",input_number);
}
double temp = mxGetScalar(input);
return (mwSize) temp;
}
//=========================================================================
//=========================================================================
#define INIT_POINTERS(TYPE) \
TYPE *p_input_data_fixed = (TYPE *)mxGetData(prhs[0]); \
TYPE *p_input_data = p_input_data_fixed; \
TYPE *p_output_data_fixed = (TYPE *)mxMalloc(sizeof(TYPE)*n_chans*n_outputs_per_chan); \
TYPE *p_output_data = p_output_data_fixed;
#define GRAB_OUTSIDE_POINTS \
/*Initialize the first and last values of the output - not class specific*/ \
/*---------------------------------------------------------------------*/ \
/*We keep the first and last values if we are not plotting everything*/ \
/* - If we don't do this Matlab can mess with the x-axes limits*/ \
/*We need to loop through each channel and assign:*/ \
/* 1) The first data point in each channel to the first output value*/ \
/* 2) The last data point in each channel to the last output value*/ \
/* */ \
/* - This is not class specific*/ \
/* - Ideally we could make this optional for streaming*/ \
if (pad_with_endpoints){ \
for (mwSize iChan = 0; iChan < n_chans; iChan++){ \
/*Store first data point to output*/ \
/* I had *p_output_data = 0 to reduce seek memory */ \
/* but this causes problems when edges are visible */ \
*p_output_data = *p_input_data; \
\
/*Advance input and output pointers to end of column*/ \
p_output_data += (n_outputs_per_chan-1); \
p_input_data += (n_samples_data-1); \
\
/*Store last data point*/ \
*p_output_data = *p_input_data; \
\
/*Roll over to the next channel*/ \
/*1st sample of next is 1 more than last sample of current*/ \
++p_input_data; \
++p_output_data; \
} \
\
/*Adjust pointers for next section*/ \
/*------------------------------------------------*/ \
/*Resetting to initial position*/ \
p_output_data = p_output_data_fixed; \
p_input_data = p_input_data_fixed; \
\
/*Move output beyond first point (logged above)*/ \
++p_output_data; \
\
if (process_subset){ \
p_input_data = p_input_data + start_index; \
} \
}
//This splitting was added for testing ...
//My preprocessor skills are not that great so I copy/pasted
//everything. I'm not sure if I could reduce redundancy
#ifdef ENABLE_OPNEMP_SIMD
//OpenMP enabled
//-----------------------------------------------------------------
#define INIT_MAIN_LOOP(type) \
/*#pragma omp parallel for simd collapse(2)*/ \
PRAGMA("omp parallel for simd collapse(2)") \
for (mwSize iChan = 0; iChan < n_chans; iChan++){ \
/*Note, we can't initialize anything before this loop, since we*/ \
/*are collapsing the first two loops. This allows us to parallelize*/ \
/*both of the first two loops, which is good when the # of channels*/ \
/*does not equal the # of threads.*/ \
for (mwSize iChunk = 0; iChunk < n_chunks; iChunk++){ \
type *current_input_data_point = p_input_data + n_samples_data*iChan + iChunk*samples_per_chunk; \
/*Pointer => start + column wrapping + offset (row into column) - 1*/ \
/* *2 since we store min and max in each chunk*/ \
type *local_output_data = p_output_data + n_outputs_per_chan*iChan + 2*iChunk;
#elif ENABLE_OPENMP
//OpenMP enabled
//-----------------------------------------------------------------
#define INIT_MAIN_LOOP(type) \
PRAGMA("omp parallel for collapse(2)") \
for (mwSize iChan = 0; iChan < n_chans; iChan++){ \
/*Note, we can't initialize anything before this loop, since we*/ \
/*are collapsing the first two loops. This allows us to parallelize*/ \
/*both of the first two loops, which is good when the # of channels*/ \
/*does not equal the # of threads.*/ \
for (mwSize iChunk = 0; iChunk < n_chunks; iChunk++){ \
type *current_input_data_point = p_input_data + n_samples_data*iChan + iChunk*samples_per_chunk; \
/*Pointer => start + column wrapping + offset (row into column) - 1*/ \
/* *2 since we store min and max in each chunk*/ \
type *local_output_data = p_output_data + n_outputs_per_chan*iChan + 2*iChunk;
#else
//OpenMP disabled version
//-----------------------------------------------------------------
#define INIT_MAIN_LOOP(type) \
for (mwSize iChan = 0; iChan < n_chans; iChan++){ \
/*Note, we can't initialize anything before this loop, since we*/ \
/*are collapsing the first two loops. This allows us to parallelize*/ \
/*both of the first two loops, which is good when the # of channels*/ \
/*does not equal the # of threads.*/ \
for (mwSize iChunk = 0; iChunk < n_chunks; iChunk++){ \
type *current_input_data_point = p_input_data + n_samples_data*iChan + iChunk*samples_per_chunk; \
/*Pointer => start + column wrapping + offset (row into column) - 1*/ \
/* *2 since we store min and max in each chunk*/ \
type *local_output_data = p_output_data + n_outputs_per_chan*iChan + 2*iChunk;
#endif
#define END_MAIN_LOOP \
} \
}
#define RUN_STD_MIN_MAX \
for (mwSize iSample = 1; iSample < samples_per_chunk; iSample++){ \
if (*(++current_input_data_point) > max){ \
max = *current_input_data_point; \
}else if (*current_input_data_point < min){ \
min = *current_input_data_point; \
} \
}
#define LOG_MIN_MAX \
*local_output_data = min; \
*(++local_output_data) = max;
#define PROCESS_EXTRA_NON_CHUNK_SAMPLES(type) \
/*---------------------------------------------------------------------*/ \
/* Processing last part that didn't fit into a chunk */ \
/*---------------------------------------------------------------------*/ \
if (n_samples_not_in_chunk){ \
PRAGMA("omp parallel for simd") \
for (mwSize iChan = 0; iChan < n_chans; iChan++){ \
\
type *current_input_data_point = p_input_data + n_samples_data*iChan + n_chunks*samples_per_chunk; \
\
type *local_output_data = p_output_data + n_outputs_per_chan*iChan + 2*n_chunks; \
\
type min = *current_input_data_point; \
type max = *current_input_data_point; \
\
for (mwSize iSample = 1; iSample < n_samples_not_in_chunk; iSample++){ \
if (*(++current_input_data_point) > max){ \
max = *current_input_data_point; \
}else if (*current_input_data_point < min){ \
min = *current_input_data_point; \
} \
} \
*local_output_data = min; \
*(++local_output_data) = max; \
} \
}
#define POPULATE_OUTPUT \
plhs[0] = mxCreateNumericMatrix(0, 0, data_class_id, mxREAL); \
mxSetData(plhs[0],p_output_data_fixed); \
mxSetM(plhs[0],n_outputs_per_chan); \
mxSetN(plhs[0],n_chans); \
if (nlhs == 2){ \
plhs[1] = mxCreateDoubleScalar(p_type); \
}
#define STD_INPUT_CALL local_output_data, local_output_data+1, samples_per_chunk, current_input_data_point
#define STD_INPUT_DEFINE(type) type *min_out, type *max_out, mwSize samples_per_chunk, type *current_input_data_point
//==================================================================
// MIN MAX STANDARD
//==================================================================
#define GET_MIN_MAX_STANDARD(TYPE) \
TYPE min = *current_input_data_point; \
TYPE max = *current_input_data_point; \
\
for (mwSize iSample = 1; iSample < samples_per_chunk; iSample++){ \
if (*(++current_input_data_point) > max){ \
max = *current_input_data_point; \
}else if (*current_input_data_point < min){ \
min = *current_input_data_point; \
} \
} \
\
*min_out = min; \
*max_out = max;
//==================================================================
void getMinMaxDouble_Standard(STD_INPUT_DEFINE(double)){
GET_MIN_MAX_STANDARD(double)
//mexPrintf("max %g\n",*min_out);
//mexPrintf("min %g\n",*max_out);
}
void getMinMaxFloat_Standard(STD_INPUT_DEFINE(float)){
GET_MIN_MAX_STANDARD(float)
}
void getMinMaxUint64_Standard(STD_INPUT_DEFINE(uint64_t)){
GET_MIN_MAX_STANDARD(uint64_t)
}
void getMinMaxUint32_Standard(STD_INPUT_DEFINE(uint32_t)){
GET_MIN_MAX_STANDARD(uint32_t)
}
void getMinMaxUint16_Standard(STD_INPUT_DEFINE(uint16_t)){
GET_MIN_MAX_STANDARD(uint16_t)
}
void getMinMaxUint8_Standard(STD_INPUT_DEFINE(uint8_t)){
GET_MIN_MAX_STANDARD(uint8_t)
}
void getMinMaxInt64_Standard(STD_INPUT_DEFINE(int64_t)){
GET_MIN_MAX_STANDARD(int64_t)
}
void getMinMaxInt32_Standard(STD_INPUT_DEFINE(int32_t)){
GET_MIN_MAX_STANDARD(int32_t)
}
void getMinMaxInt16_Standard(STD_INPUT_DEFINE(int16_t)){
GET_MIN_MAX_STANDARD(int16_t)
}
void getMinMaxInt8_Standard(STD_INPUT_DEFINE(int8_t)){
GET_MIN_MAX_STANDARD(int8_t)
}
//==================================================================
//GET_MIN_MAX_SIMD(double,,4,__m256d,_mm256_loadu_pd,_mm256_max_pd,_mm256_min_pd,_mm256_storeu_pd)
// next = _mm256_loadu_si256((__m256i *)(data+j));
//==================================================================
// MIN MAX SIMD
//==================================================================
#define GET_MIN_MAX_SIMD(TYPE,CAST,N_SIMD,SIMD_TYPE,LOAD,MAX,MIN,STORE) \
SIMD_TYPE next; \
SIMD_TYPE max_result; \
SIMD_TYPE min_result; \
TYPE max_output[N_SIMD]; \
TYPE min_output[N_SIMD]; \
TYPE min; \
TYPE max; \
\
max_result = LOAD(CAST current_input_data_point); \
min_result = max_result; \
\
for (mwSize j = N_SIMD; j < (samples_per_chunk/N_SIMD)*N_SIMD; j+=N_SIMD){ \
next = LOAD(CAST (current_input_data_point+j)); \
max_result = MAX(max_result, next); \
min_result = MIN(min_result, next); \
} \
\
/*Extract max values and reduce ...*/ \
STORE(CAST max_output, max_result); \
STORE(CAST min_output, min_result); \
\
max = max_output[0]; \
for (int i = 1; i < N_SIMD; i++){ \
if (max_output[i] > max){ \
max = max_output[i]; \
} \
} \
min = min_output[0]; \
for (int i = 1; i < N_SIMD; i++){ \
if (min_output[i] < min){ \
min = min_output[i]; \
} \
} \
\
for (mwSize j = (samples_per_chunk/N_SIMD)*N_SIMD; j < samples_per_chunk; j++){ \
if (*(current_input_data_point + j) > max){ \
max = *(current_input_data_point + j); \
}else if (*(current_input_data_point + j) < min){ \
min = *(current_input_data_point + j); \
} \
} \
\
*min_out = min; \
*max_out = max;
//=========================================================================
void getMinMaxDouble_SIMD_256(STD_INPUT_DEFINE(double)){
GET_MIN_MAX_SIMD(double,,4,__m256d,_mm256_loadu_pd,_mm256_max_pd,_mm256_min_pd,_mm256_storeu_pd)
}
void getMinMaxFloat_SIMD_256(STD_INPUT_DEFINE(float)){
GET_MIN_MAX_SIMD(float,,8,__m256,_mm256_loadu_ps,_mm256_max_ps,_mm256_min_ps,_mm256_storeu_ps)
}
//--------------------
void getMinMaxUint32_SIMD_256(STD_INPUT_DEFINE(uint32_t)){
GET_MIN_MAX_SIMD(uint32_t,(__m256i *),8,__m256i,_mm256_loadu_si256,_mm256_max_epu32,_mm256_min_epu32,_mm256_storeu_si256)
}
void getMinMaxUint32_SIMD_128(STD_INPUT_DEFINE(uint32_t)){
GET_MIN_MAX_SIMD(uint32_t,(__m128i *),4,__m128i,_mm_loadu_si128,_mm_max_epu32,_mm_min_epu32,_mm_storeu_si128)
}
//--------------------
void getMinMaxUint16_SIMD_256(STD_INPUT_DEFINE(uint16_t)){
GET_MIN_MAX_SIMD(uint16_t,(__m256i *),16,__m256i,_mm256_loadu_si256,_mm256_max_epu16,_mm256_min_epu16,_mm256_storeu_si256)
}
void getMinMaxUint16_SIMD_128(STD_INPUT_DEFINE(uint16_t)){
GET_MIN_MAX_SIMD(uint16_t,(__m128i *),8,__m128i,_mm_loadu_si128,_mm_max_epu16,_mm_min_epu16,_mm_storeu_si128)
}
//--------------------
void getMinMaxUint8_SIMD_256(STD_INPUT_DEFINE(uint8_t)){
GET_MIN_MAX_SIMD(uint8_t,(__m256i *),32,__m256i,_mm256_loadu_si256,_mm256_max_epu8,_mm256_min_epu8,_mm256_storeu_si256)
}
void getMinMaxUint8_SIMD_128(STD_INPUT_DEFINE(uint8_t)){
GET_MIN_MAX_SIMD(uint8_t,(__m128i *),16,__m128i,_mm_loadu_si128,_mm_max_epu8,_mm_min_epu8,_mm_storeu_si128)
}
//--------------------
void getMinMaxInt32_SIMD_256(STD_INPUT_DEFINE(int32_t)){
GET_MIN_MAX_SIMD(int32_t,(__m256i *),8,__m256i,_mm256_loadu_si256,_mm256_max_epi32,_mm256_min_epi32,_mm256_storeu_si256)
}
void getMinMaxInt32_SIMD_128(STD_INPUT_DEFINE(int32_t)){
GET_MIN_MAX_SIMD(int32_t,(__m128i *),4,__m128i,_mm_loadu_si128,_mm_max_epi32,_mm_min_epi32,_mm_storeu_si128)
}
//--------------------
void getMinMaxInt16_SIMD_256(STD_INPUT_DEFINE(int16_t)){
GET_MIN_MAX_SIMD(int16_t,(__m256i *),16,__m256i,_mm256_loadu_si256,_mm256_max_epi16,_mm256_min_epi16,_mm256_storeu_si256)
}
void getMinMaxInt16_SIMD_128(STD_INPUT_DEFINE(int16_t)){
GET_MIN_MAX_SIMD(int16_t,(__m128i *),8,__m128i,_mm_loadu_si128,_mm_max_epi16,_mm_min_epi16,_mm_storeu_si128)
}
//--------------------
void getMinMaxInt8_SIMD_256(STD_INPUT_DEFINE(int8_t)){
GET_MIN_MAX_SIMD(int8_t,(__m256i *),32,__m256i,_mm256_loadu_si256,_mm256_max_epi8,_mm256_min_epi8,_mm256_storeu_si256)
}
void getMinMaxInt8_SIMD_128(STD_INPUT_DEFINE(int8_t)){
GET_MIN_MAX_SIMD(int8_t,(__m128i *),16,__m128i,_mm_loadu_si128,_mm_max_epi8,_mm_min_epi8,_mm_storeu_si128)
}
//=========================================================================
static int hw_struct_initialized = 0;
static struct cpu_x86 s;
//=========================================================================
// MEX ENTRY POINT
//=========================================================================
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[])
{
//
// Calling Form
// ------------
// min_max_data = reduce_to_width_mex(data,samples_per_chunk,*start_sample,*end_sample);
//
// Inputs
// ------
// data : [samples x channels]
// samples_per_chunk : #
// The output is a min and max pair per chunk (with possible data
// padding)
//
// Optional Inputs
// ---------------
// start_sample: #, 1 based
// If specified, the end sample must also be specified
// end_sample: #, 1 based
//
// Outputs
// -------
// min_max_data :
// p_type :
// - 0 - nothing
// - 1 - SSE2
// - 2 - SSE41
// - 3 - AVX
// - 4 - AVX2
if (!hw_struct_initialized){
cpu_x86__detect_host(&s);
hw_struct_initialized = 1;
}
// #ifdef _OPENMP
// mexPrintf("OpenMP version: %d\n",_OPENMP);
// #endif
bool process_subset;
double p_type = 0;
//---------------------------------------------------------------------
// Input Checking
//---------------------------------------------------------------------
if (!(nrhs == 2 || nrhs == 4)){
mexErrMsgIdAndTxt("SL:reduce_to_width:n_inputs",
"Invalid # of inputs, 2 or 4 expected");
}else if (!mxIsClass(prhs[1],"double")){
//samples_per_chunk should be double
mexErrMsgIdAndTxt("SL:reduce_to_width:input_class_type",
"Second input type needs to be double");
}
if (nrhs == 4){
process_subset = true;
if (!mxIsClass(prhs[2],"double")){
mexErrMsgIdAndTxt("SL:reduce_to_width:input_class_type",
"Third input type needs to be double");
}else if (!mxIsClass(prhs[3],"double")){
mexErrMsgIdAndTxt("SL:reduce_to_width:input_class_type",
"Fourth input type needs to be double");
}
}else{
process_subset = false;
}
if (!(nlhs == 1 || nlhs == 2)){
mexErrMsgIdAndTxt("jsmn_mex:n_inputs",
"Invalid # of outputs, 1 or 2 expected");
}
//---------------------------------------------------------------------
// Initialization of variables
//---------------------------------------------------------------------
//This is used to adjust the data pointer to the start of each column
mwSize n_samples_data = mxGetM(prhs[0]);
//This is used to indicate how many samples we need to examine
//for min and max values
mwSize n_samples_process = n_samples_data;
mwSize n_chans = mxGetN(prhs[0]);
mwSize samples_per_chunk = getScalarInput(prhs[1],2);
mwSize start_index;
mwSize stop_index;
//If we process a subset, determine how many samples we need to
//offset the start and how many less samples are going to process.
//---------------------------------------------------------------------
if (process_subset){
start_index = getScalarInput(prhs[2],3) - 1; //make 0 based
stop_index = getScalarInput(prhs[3],4) - 1;
mwSize max_valid_index = n_samples_data - 1;
if (start_index < 0 || start_index > max_valid_index){
mexErrMsgIdAndTxt("SL:reduce_to_width:start_index","Start index is out of range");
}else if (stop_index < 0 || stop_index > max_valid_index){
mexErrMsgIdAndTxt("SL:reduce_to_width:stop_index","Stop index is out of range");
}else if (stop_index < start_index){
mexErrMsgIdAndTxt("SL:reduce_to_width:stop_before_start","Start index comes after stop index");
}
n_samples_process = stop_index - start_index + 1;
}
//In general we pad with the endpoints to prevent axes resizing
//(in Matlab). We always pad with the endpoints when a subset
//is requested.
bool pad_with_endpoints = n_samples_process != n_samples_data;
//Integer division, should automatically floor (as desired)
mwSize n_chunks = n_samples_process/samples_per_chunk;
mwSize n_samples_not_in_chunk = n_samples_process - n_chunks*samples_per_chunk;
//For each chunk we store a min and max value
//Even if the same value we duplicate it.
mwSize n_outputs_per_chan = 2*n_chunks;
if (n_samples_not_in_chunk){
//Add on one extra pair when the # of samples per chunk doesn't
//evenly dividie the input data
n_outputs_per_chan += 2;
}
//Note, we might get some replication with the first and last
//data points if only one of those is cropped. This should be fine
//for rendering.
if (pad_with_endpoints){
n_outputs_per_chan += 2;
}
//Dispatch based on data type
//---------------------------------------------------------------------
mxClassID data_class_id = mxGetClassID(prhs[0]);
switch (data_class_id){
case mxDOUBLE_CLASS:
goto S_PROCESS_DOUBLE;
break;
case mxSINGLE_CLASS:
goto S_PROCESS_SINGLE;
break;
case mxINT64_CLASS:
goto S_PROCESS_INT64;
break;
case mxUINT64_CLASS:
goto S_PROCESS_UINT64;
break;
case mxINT32_CLASS:
goto S_PROCESS_INT32;
break;
case mxUINT32_CLASS:
goto S_PROCESS_UINT32;
break;
case mxINT16_CLASS:
goto S_PROCESS_INT16;
break;
case mxUINT16_CLASS:
goto S_PROCESS_UINT16;
break;
case mxINT8_CLASS:
goto S_PROCESS_INT8;
break;
case mxUINT8_CLASS:
goto S_PROCESS_UINT8;
break;
default:
mexErrMsgIdAndTxt("JAH:reduce_to_width_mex",
"Class is not supported");
}
//=========================================================================
// Processing based on type
//=========================================================================
S_PROCESS_DOUBLE:;
{
//Design Notes
//-----------------------------------------------------------------
//- Given the high # of variables in play I am using goto
// instead of passing the variables into a function. Presumably
// a variable struct would work as well, but I found this slightly
// easier.
//- Due to differing definitions of variable types, all states are
// enclosed in brackets
//- The if-statements are outside the loops. Presumably the compiler
// could optimize this away if inside the loops but I wasn't sure.
INIT_POINTERS(double);
GRAB_OUTSIDE_POINTS;
//Note I'm skipping the old SSE version since I expect
//everyone to have AVX
//- the OS_AVX is to be technically correct but I expect
//all current OSs to have it enabled
if (SIMD_ENABLED && s.HW_AVX && s.OS_AVX && samples_per_chunk > 4){
INIT_MAIN_LOOP(double)
getMinMaxDouble_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 3;
}else{
INIT_MAIN_LOOP(double)
getMinMaxDouble_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(double)
POPULATE_OUTPUT
return;
}
S_PROCESS_SINGLE:;
{
INIT_POINTERS(float);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX && s.OS_AVX && samples_per_chunk > 8){
INIT_MAIN_LOOP(float)
getMinMaxFloat_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 3;
}else{
INIT_MAIN_LOOP(float)
getMinMaxFloat_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(float)
POPULATE_OUTPUT
return;
}
S_PROCESS_UINT64:;
{
//SIMD not available until AVX512. We could code this up but
//I can't test it
INIT_POINTERS(uint64_t);
GRAB_OUTSIDE_POINTS;
INIT_MAIN_LOOP(uint64_t)
getMinMaxUint64_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
PROCESS_EXTRA_NON_CHUNK_SAMPLES(uint64_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_UINT32:;
{
INIT_POINTERS(uint32_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 8){
INIT_MAIN_LOOP(uint32_t)
getMinMaxUint32_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if (SIMD_ENABLED && s.HW_SSE41 && samples_per_chunk > 4){
INIT_MAIN_LOOP(uint32_t)
getMinMaxUint32_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 2;
}else{
INIT_MAIN_LOOP(uint32_t)
getMinMaxUint32_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(uint32_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_UINT16:;
{
INIT_POINTERS(uint16_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 16){
INIT_MAIN_LOOP(uint16_t)
getMinMaxUint16_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if (SIMD_ENABLED && s.HW_SSE41 && samples_per_chunk > 8){
INIT_MAIN_LOOP(uint16_t)
getMinMaxUint16_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 2;
}else{
INIT_MAIN_LOOP(uint16_t)
getMinMaxUint16_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(uint16_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_UINT8:;
{
INIT_POINTERS(uint8_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 32){
INIT_MAIN_LOOP(uint8_t)
getMinMaxUint8_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if(SIMD_ENABLED && s.HW_SSE2 && samples_per_chunk > 16){
INIT_MAIN_LOOP(uint8_t)
getMinMaxUint8_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 1;
}else{
INIT_MAIN_LOOP(uint8_t)
getMinMaxUint8_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(uint8_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_INT64:;
{
INIT_POINTERS(int64_t);
GRAB_OUTSIDE_POINTS;
INIT_MAIN_LOOP(int64_t)
getMinMaxInt64_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
PROCESS_EXTRA_NON_CHUNK_SAMPLES(int64_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_INT32:;
{
INIT_POINTERS(int32_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 8){
INIT_MAIN_LOOP(int32_t)
getMinMaxInt32_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if (SIMD_ENABLED && s.HW_SSE41 && samples_per_chunk > 4){
INIT_MAIN_LOOP(int32_t)
getMinMaxInt32_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 2;
}else{
INIT_MAIN_LOOP(int32_t)
getMinMaxInt32_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(int32_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_INT16:;
{
INIT_POINTERS(int16_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 16){
INIT_MAIN_LOOP(int16_t)
getMinMaxInt16_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if (SIMD_ENABLED && s.HW_SSE2 && samples_per_chunk > 8){
INIT_MAIN_LOOP(int16_t)
getMinMaxInt16_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 1;
}else{
INIT_MAIN_LOOP(int16_t)
getMinMaxInt16_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(int16_t)
POPULATE_OUTPUT
return;
}
S_PROCESS_INT8:;
{
INIT_POINTERS(int8_t);
GRAB_OUTSIDE_POINTS;
if (SIMD_ENABLED && s.HW_AVX2 && s.OS_AVX && samples_per_chunk > 32){
INIT_MAIN_LOOP(int8_t)
getMinMaxInt8_SIMD_256(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 4;
}else if (SIMD_ENABLED && s.HW_SSE41 && samples_per_chunk > 16){
INIT_MAIN_LOOP(int8_t)
getMinMaxInt8_SIMD_128(STD_INPUT_CALL);
END_MAIN_LOOP
p_type = 2;
}else{
INIT_MAIN_LOOP(int8_t)
getMinMaxInt8_Standard(STD_INPUT_CALL);
END_MAIN_LOOP
}
PROCESS_EXTRA_NON_CHUNK_SAMPLES(int8_t)
POPULATE_OUTPUT
return;
}
} |
FileParser.h | //
// Created by Timm Felden on 04.11.15.
//
#ifndef SKILL_CPP_COMMON_FILEPARSER_H_H
#define SKILL_CPP_COMMON_FILEPARSER_H_H
#include "../common.h"
#include "../api/SkillFile.h"
#include "ParseException.h"
#include "../streams/FileInputStream.h"
#include "StringPool.h"
#include "AbstractStoragePool.h"
#include "../restrictions/FieldRestriction.h"
#include "../restrictions/TypeRestriction.h"
#include "../fieldTypes/BuiltinFieldType.h"
#include "../fieldTypes/AnnotationType.h"
#include "LazyField.h"
#include <vector>
#include <unordered_map>
#include <string>
#include <iostream>
#include <cassert>
#if defined(_OPENMP)
#include <omp.h>
#endif
/**
* set to 1, to enable debug output; this should be disabled on all commits
*/
#define debugOnly if(0)
namespace skill {
using namespace streams;
using namespace fieldTypes;
using namespace restrictions;
namespace internal {
/**
* Turns a field type into a preliminary type information. In case of user types, the declaration
* of the respective user type may follow after the field declaration.
*/
inline const FieldType *parseFieldType(FileInputStream *in,
const std::vector<AbstractStoragePool *> *types,
StringPool *String,
AnnotationType *Annotation,
int blockCounter) {
const TypeID i = (TypeID) in->v64();
switch (i) {
case 0 :
return new ConstantI8(in->i8());
case 1 :
return new ConstantI16(in->i16());
case 2 :
return new ConstantI32(in->i32());
case 3 :
return new ConstantI64(in->i64());
case 4 :
return new ConstantV64(in->v64());
case 5 :
return Annotation;
case 6 :
return &BoolType;
case 7 :
return &I8;
case 8 :
return &I16;
case 9 :
return &I32;
case 10:
return &I64;
case 11:
return &V64;
case 12:
return &F32;
case 13:
return &F64;
case 14:
return String;
case 15: {
int64_t length = in->v64();
auto t = parseFieldType(in, types, String, Annotation, blockCounter);
return new ConstantLengthArray(length, t);
}
case 17:
return new VariableLengthArray(parseFieldType(in, types, String, Annotation, blockCounter));
case 18:
return new ListType(parseFieldType(in, types, String, Annotation, blockCounter));
case 19:
return new SetType(parseFieldType(in, types, String, Annotation, blockCounter));
case 20:
return new MapType(parseFieldType(in, types, String, Annotation, blockCounter),
parseFieldType(in, types, String, Annotation, blockCounter));
default:
if (i >= 32 && i - 32 < (TypeID) types->size())
return types->at(i - 32);
else
throw ParseException(in, blockCounter,
"Invalid type ID");
}
}
/**
* create a new empty skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *newFile(const std::string &path, WriteMode mode) {
FileInputStream *in = new FileInputStream(path, "w");
StringPool *String = initializeStrings(in);
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
AnnotationType *Annotation = new AnnotationType(types);
api::typeByName_t *typesByName = new api::typeByName_t;
std::vector<std::unique_ptr<MappedInStream>> dataList;
return makeState(in, mode, String,
Annotation, types,
typesByName,
dataList);
}
/**
* parses a skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper ),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *parseFile(std::unique_ptr<FileInputStream> in, WriteMode mode) {
struct LFEntry {
LFEntry(AbstractStoragePool *const pool, SKilLID count)
: pool(pool), count(count) {}
AbstractStoragePool *const pool;
const SKilLID count;
};
// PARSE STATE
std::unique_ptr<StringPool> String(initializeStrings(in.get()));
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
std::unique_ptr<AnnotationType> Annotation(new AnnotationType(types));
std::unique_ptr<api::typeByName_t> typesByName(new api::typeByName_t);
std::vector<std::unique_ptr<MappedInStream>> dataList;
// process stream
debugOnly {
std::cout << std::endl << "file " << in->getPath() << std::endl;
}
for (int blockCounter = 0; !in->eof(); blockCounter++) {
debugOnly {
std::cout << "block " << blockCounter << " starting at " << in->getPosition() << std::endl;
}
// string block
try {
const int count = (int) in->v64();
debugOnly {
std::cout << count << " strings" << std::endl;
}
if (0 != count) {
int last = 0, offset = 0;
const long position = in->getPosition() + 4 * count;
for (int i = count; i != 0; i--) {
offset = in->i32();
String->addPosition(std::pair<long, int>(position + last, offset - last));
last = offset;
}
in->jump(in->getPosition() + last);
}
} catch (SkillException e) {
throw ParseException(in, blockCounter, "corrupted string block");
}
debugOnly {
std::cout << "string block ended at " << in->getPosition() << std::endl;
}
// type block
try {
TypeID typeCount = (TypeID) in->v64();
// this barrier is strictly increasing inside of each block and reset to 0 at the beginning of each block
TypeID blockIDBarrier = 0;
std::set<api::String> seenTypes;
// number of fields to expect for that type in this block
std::vector<LFEntry> localFields;
// parse type definitions
while (typeCount-- > 0) {
api::String name = String->get((SKilLID) in->v64());
// check null name
if (nullptr == name)
throw ParseException(in, blockCounter,
"Corrupted file, nullptr in typename");
debugOnly {
std::cout << "processing type " << *name << " at " << in->getPosition()
<< std::endl;
}
// check duplicate types
if (seenTypes.find(name) != seenTypes.end())
throw ParseException(
in, blockCounter,
std::string("Duplicate definition of type ").append(*name));
seenTypes.insert(name);
const int count = (int) in->v64();
auto defIter = typesByName->find(name);
if (defIter == typesByName->end()) {
// unknown type
// type restrictions
int restrictionCount = (int) in->v64();
auto rest = std::unique_ptr<std::set<TypeRestriction *>>(new std::set<TypeRestriction *>);
//! TODO restrictions
// rest.sizeHint(restrictionCount)
while (restrictionCount-- > 0) {
switch ((char) in->v64()) {
case 0: //restrictions.Unique
break;
case 1: // restrictions.Singleton
break;
case 2: // restrictions.Monotone
break;
case 3: // restrictions.Abstract
break;
case 5:
in->v64(); // restrictions.DefaultTypeRestriction(in.v64.toInt)
break;
default:
ParseException(
in, blockCounter,
"Found an unknown type restriction. Please regenerate your binding, if possible.");
}
// TODO rest +=
}
// super
const TypeID superID = (TypeID) in->v64();
AbstractStoragePool *superPool;
if (0 == superID)
superPool = nullptr;
else if (superID > (TypeID) types->size()) {
throw ParseException(
in, blockCounter,
std::string("Type ").append(*name).append(
" refers to an ill-formed super type."));
} else {
superPool = types->at(superID - 1);
assert(superPool);
}
// allocate pool
AbstractStoragePool *r = newPool(
(TypeID) types->size() + 32, name, superPool, rest.get(), String->keeper);
rest.release();
types->push_back(r);
defIter = typesByName->insert(
std::pair<api::String, AbstractStoragePool *>(name, r)).first;
}
AbstractStoragePool *const definition = defIter->second;
if (blockIDBarrier < definition->typeID)
blockIDBarrier = definition->typeID;
else
throw ParseException(in, blockCounter, "Found unordered type block.");
// in contrast to prior implementation, bpo is the position inside of data, even if there are no actual
// instances. We need this behavior, because that way we can cheaply calculate the number of static instances
const SKilLID lbpo =
definition->basePool->cachedSize + (nullptr == definition->superPool ? 0 : (
0 != count ? (SKilLID) in->v64() :
definition->superPool->blocks.back().bpo));
// ensure that bpo is in fact inside of the parents block
if (definition->superPool) {
const auto &b = definition->superPool->blocks.back();
if (lbpo < b.bpo || b.bpo + b.dynamicCount < lbpo)
throw ParseException(in, blockCounter,
"Found broken bpo.");
}
// static count and cached size are updated in the resize phase
// @note we assume that all dynamic instance are static instances as well, until we know for sure
definition->blocks.push_back(Block(blockCounter, lbpo, count, count));
definition->staticDataInstances += count;
localFields.push_back(LFEntry(definition, (SKilLID) in->v64()));
}
// resize pools, i.e. update cachedSize and staticCount
for (auto &e : localFields) {
const auto p = e.pool;
const auto &b = p->blocks.back();
p->cachedSize += b.dynamicCount;
if (0 != b.dynamicCount) {
// calculate static count of our parent
const auto &parent = p->superPool;
if (parent) {
auto &sb = parent->blocks.back();
// assumed static instances, minus what static instances would be, if p were the first sub pool.
const auto delta = sb.staticCount - (b.bpo - sb.bpo);
// if positive, then we have to subtract it from the assumed static count (local and global)
if (delta > 0) {
sb.staticCount -= delta;
parent->staticDataInstances -= delta;
}
}
}
}
// track offset information, so that we can create the block maps and jump to the next block directly after
// parsing field information
long dataEnd = 0L;
// parse fields
for (const auto &e : localFields) {
const auto &p = e.pool;
TypeID legalFieldIDBarrier = 1 + (TypeID) p->dataFields.size();
const auto &block = p->blocks.back();
auto localFieldCount = e.count;
while (localFieldCount-- > 0) {
const TypeID id = (TypeID) in->v64();
if (id <= 0 || legalFieldIDBarrier < id)
throw ParseException(in, blockCounter,
"Found an illegal field ID.");
long endOffset = 0;
if (id == legalFieldIDBarrier) {
// new field
legalFieldIDBarrier++;
const api::String fieldName = String->get((SKilLID) in->v64());
if (!fieldName)
throw ParseException(in, blockCounter,
"A field has a nullptr as name.");
debugOnly {
std::cout << "processing new field " << *p->name << "." << *fieldName
<< " at " << in->getPosition() << std::endl;
}
const auto t = parseFieldType(in.get(), types, String.get(), Annotation.get(),
blockCounter);
// parse field restrictions
std::set<const restrictions::FieldRestriction *> rest;
int fieldRestrictionCount = (int) in->v64();
for (; fieldRestrictionCount != 0; fieldRestrictionCount--) {
const int i = (const int) in->v64();
switch (i) {
case 0: {// nonnull
rest.insert(restrictions::NonNull::get());
break;
}
case 1: {// default
if (5 == t->typeID || 32 <= t->typeID)
in->v64();
else
t->read(*in);
break;
}
case 3: {
//range
switch (t->typeID) {
case 7:
rest.insert(new restrictions::Range<int8_t>(in->i8(), in->i8()));
break;
case 8:
rest.insert(new restrictions::Range<int16_t>(in->i16(), in->i16()));
break;
case 9:
rest.insert(new restrictions::Range<int32_t>(in->i32(), in->i32()));
break;
case 10:
rest.insert(new restrictions::Range<int64_t>(in->i64(), in->i64()));
break;
case 11:
rest.insert(new restrictions::Range<int64_t>(in->v64(), in->v64()));
break;
case 12:
rest.insert(new restrictions::Range<float>(in->f32(), in->f32()));
break;
case 13:
rest.insert(new restrictions::Range<double>(in->f64(), in->f64()));
break;
default:
throw ParseException(
in, blockCounter,
"Range restricton on a type that can not be restricted.");
}
break;
}
case 5: { // coding
String->get((SKilLID) in->v64());
break;
}
case 7: {
// constant length pointer
break;
}
case 9: {
// oneof
// read array of type IDs
for (int c = in->v64(); c != 0; c--)
in->v64();
break;
}
default:
throw ParseException(
in, blockCounter,
"Found an unknown field restriction. Please regenerate your binding, if possible.");
}
}
endOffset = in->v64();
auto f = p->addField(String->keeper, id, t, fieldName);
for (auto r : rest)
f->addRestriction(r);
f->addChunk(
new BulkChunk(dataEnd, endOffset, p->cachedSize, p->blocks.size()));
} else {
// known field
endOffset = in->v64();
p->dataFields[id - 1]->addChunk(
new SimpleChunk(dataEnd, endOffset, block.dynamicCount, block.bpo));
}
dataEnd = endOffset;
}
}
debugOnly {
std::cout << "reached end of type header at " << in->getPosition() << std::endl;
}
// jump over data and continue in the next block
dataList.push_back(std::unique_ptr<MappedInStream>(in->jumpAndMap(dataEnd)));
} catch (SkillException e) {
throw e;
} catch (...) {
throw ParseException(in, blockCounter, "unexpected foreign exception");
}
}
// note there still isn't a single instance
return makeState(in.release(), mode, String.release(), Annotation.release(), types,
typesByName.release(),
dataList);
}
/**
* has to be called by make state after instances have been allocated to ensure
* that required fields are read from file
*/
inline void triggerFieldDeserialization(std::vector<AbstractStoragePool *> *types,
std::vector<std::unique_ptr<MappedInStream>> &dataList) {
std::vector<std::string *> results;
#pragma omp parallel for schedule(dynamic) num_threads(omp_get_max_threads()/2)
for (size_t i = 0; i < types->size(); i++) {
auto t = types->at(i);
#pragma omp parallel for schedule(dynamic) num_threads(2)
for (size_t j = 0; j < t->dataFields.size(); j++) {
auto f = t->dataFields[j];
int bsIndex = 0;
for (Chunk *dc : f->dataChunks) {
if (dynamic_cast<BulkChunk *>(dc)) {
// skip blocks that do not contain data for our field
bsIndex += ((BulkChunk *) dc)->blockCount - 1;
}
const int blockIndex = t->blocks[bsIndex++].blockIndex;
if (dc->count) {
MappedInStream *part = dataList[blockIndex].get();
skill::streams::MappedInStream in(part, dc->begin, dc->end);
try {
if (auto c = dynamic_cast<const ::skill::internal::SimpleChunk *>(dc)) {
int i = c->bpo + 1;
f->rsc(i, i + c->count, &in);
} else {
auto bc = dynamic_cast<const ::skill::internal::BulkChunk *>(dc);
f->rbc(&in, bc);
}
if (!(in.eof() || nullptr != dynamic_cast<::skill::internal::LazyField *>(f))) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field: "
<< f->owner->name->c_str()
<< "."
<< f->name->c_str()
<< "\n Position: "
<< in.getPosition()
<< "\n reason: Did not consume all bytes." << std::endl;
results.push_back(new std::string(message.str()));
}
};
} catch (SkillException e) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field.\n Position "
<< in.getPosition()
<< "\n reason: "
<< e.message << std::endl;
results.push_back(new std::string(message.str()));
}
} catch (...) {
#pragma omp critical
{
results.push_back(new std::string("unknown error in concurrent read"));
}
}
}
}
}
}
// check for errors
if (results.size()) {
std::stringstream msg;
for (const auto s : results) {
if (s) {
msg << *s << std::endl;
delete s;
}
}
throw SkillException(msg.str());
}
}
}
}
#undef debugOnly
#endif //SKILL_CPP_COMMON_FILEPARSER_H_H
|
V4_openmp.h | #include <time.h>
#include <omp.h>
long v4_openmp( int * row, int * col,
float * c, int M, int nz)
{
//The Variables used to time the function
struct timespec ts_start;
struct timespec ts_end;
//Initialization of c
for(int i=0; i<M; i++) c[i] = 0;
//Declaring the private variables of the OpenMP Parallelization
int i,j,k,l;
//Start the clock
clock_gettime(CLOCK_MONOTONIC, &ts_start);
//Starting the OpenMP Parallelization
#pragma omp parallel shared(row, col) private(i, j, k, l)
{
//Parallelizing for
#pragma omp for schedule(dynamic)
for(i=0; i<M; i++){
for(j=col[i]; j<col[i+1]; j++){
k=col[i];
l=col[row[j]];
while(k<col[i+1] && l< col[row[j] +1]){
if(row[l] == row[k]){
c[i] += 0.5;
k++;
l++;
}else if(row[l] > row[k]){
k++;
}else{
l++;
}
}
}
}
}
//Stop the clock
clock_gettime(CLOCK_MONOTONIC, &ts_end);
//Return the execution run-time
return (ts_end.tv_sec - ts_start.tv_sec)* 1000000 + (ts_end.tv_nsec - ts_start.tv_nsec)/ 1000;
}
|
Ooura_FFT.h | #ifndef _H_OOURA_FFT_
#define _H_OOURA_FFT_
#include <cmath>
class Ooura_FFT{
private:
int frame_size;
int channels;
double **a, **w;
int **ip;
public:
inline Ooura_FFT(int _frame_size, int _channels);
inline ~Ooura_FFT();
inline void FFT(double **);
inline void FFT(double **, int target_channels);
inline void iFFT(double **);
inline void FFT(double *);
inline void iFFT(double *);
inline void SingleFFT(double *);
inline void SingleiFFT(double *);
};
/*
Copyright:
Copyright(C) 1996-2001 Takuya OOURA
email: ooura@mmm.t.u-tokyo.ac.jp
download: http://momonga.t.u-tokyo.ac.jp/~ooura/fft.html
You may use, copy, modify this code for any purpose and
without fee. You may distribute this ORIGINAL package.
Fast Fourier/Cosine/Sine Transform
dimension :one
data length :power of 2
decimation :frequency
radix :4, 2
data :inplace
table :use
functions
cdft: Complex Discrete Fourier Transform
rdft: Real Discrete Fourier Transform
ddct: Discrete Cosine Transform
ddst: Discrete Sine Transform
dfct: Cosine Transform of RDFT (Real Symmetric DFT)
dfst: Sine Transform of RDFT (Real Anti-symmetric DFT)
function prototypes
void cdft(int, int, double *, int *, double *);
void rdft(int, int, double *, int *, double *);
void ddct(int, int, double *, int *, double *);
void ddst(int, int, double *, int *, double *);
void dfct(int, double *, double *, int *, double *);
void dfst(int, double *, double *, int *, double *);
-------- Complex DFT (Discrete Fourier Transform) --------
[definition]
<case1>
X[k] = sum_j=0^n-1 x[j]*exp(2*pi*i*j*k/n), 0<=k<n
<case2>
X[k] = sum_j=0^n-1 x[j]*exp(-2*pi*i*j*k/n), 0<=k<n
(notes: sum_j=0^n-1 is a summation from j=0 to n-1)
[usage]
<case1>
ip[0] = 0; // first time only
cdft(2*n, 1, a, ip, w);
<case2>
ip[0] = 0; // first time only
cdft(2*n, -1, a, ip, w);
[parameters]
2*n :data length (int)
n >= 1, n = power of 2
a[0...2*n-1] :input/output data (double *)
input data
a[2*j] = Re(x[j]),
a[2*j+1] = Im(x[j]), 0<=j<n
output data
a[2*k] = Re(X[k]),
a[2*k+1] = Im(X[k]), 0<=k<n
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n)
strictly,
length of ip >=
2+(1<<(int)(log(n+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n/2-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
cdft(2*n, -1, a, ip, w);
is
cdft(2*n, 1, a, ip, w);
for (j = 0; j <= 2 * n - 1; j++) {
a[j] *= 1.0 / n;
}
.
-------- Real DFT / Inverse of Real DFT --------
[definition]
<case1> RDFT
R[k] = sum_j=0^n-1 a[j]*cos(2*pi*j*k/n), 0<=k<=n/2
I[k] = sum_j=0^n-1 a[j]*sin(2*pi*j*k/n), 0<k<n/2
<case2> IRDFT (excluding scale)
a[k] = (R[0] + R[n/2]*cos(pi*k))/2 +
sum_j=1^n/2-1 R[j]*cos(2*pi*j*k/n) +
sum_j=1^n/2-1 I[j]*sin(2*pi*j*k/n), 0<=k<n
[usage]
<case1>
ip[0] = 0; // first time only
rdft(n, 1, a, ip, w);
<case2>
ip[0] = 0; // first time only
rdft(n, -1, a, ip, w);
[parameters]
n :data length (int)
n >= 2, n = power of 2
a[0...n-1] :input/output data (double *)
<case1>
output data
a[2*k] = R[k], 0<=k<n/2
a[2*k+1] = I[k], 0<k<n/2
a[1] = R[n/2]
<case2>
input data
a[2*j] = R[j], 0<=j<n/2
a[2*j+1] = I[j], 0<j<n/2
a[1] = R[n/2]
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n/2)
strictly,
length of ip >=
2+(1<<(int)(log(n/2+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n/2-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
rdft(n, 1, a, ip, w);
is
rdft(n, -1, a, ip, w);
for (j = 0; j <= n - 1; j++) {
a[j] *= 2.0 / n;
}
.
-------- DCT (Discrete Cosine Transform) / Inverse of DCT --------
[definition]
<case1> IDCT (excluding scale)
C[k] = sum_j=0^n-1 a[j]*cos(pi*j*(k+1/2)/n), 0<=k<n
<case2> DCT
C[k] = sum_j=0^n-1 a[j]*cos(pi*(j+1/2)*k/n), 0<=k<n
[usage]
<case1>
ip[0] = 0; // first time only
ddct(n, 1, a, ip, w);
<case2>
ip[0] = 0; // first time only
ddct(n, -1, a, ip, w);
[parameters]
n :data length (int)
n >= 2, n = power of 2
a[0...n-1] :input/output data (double *)
output data
a[k] = C[k], 0<=k<n
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n/2)
strictly,
length of ip >=
2+(1<<(int)(log(n/2+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n*5/4-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
ddct(n, -1, a, ip, w);
is
a[0] *= 0.5;
ddct(n, 1, a, ip, w);
for (j = 0; j <= n - 1; j++) {
a[j] *= 2.0 / n;
}
.
-------- DST (Discrete Sine Transform) / Inverse of DST --------
[definition]
<case1> IDST (excluding scale)
S[k] = sum_j=1^n A[j]*sin(pi*j*(k+1/2)/n), 0<=k<n
<case2> DST
S[k] = sum_j=0^n-1 a[j]*sin(pi*(j+1/2)*k/n), 0<k<=n
[usage]
<case1>
ip[0] = 0; // first time only
ddst(n, 1, a, ip, w);
<case2>
ip[0] = 0; // first time only
ddst(n, -1, a, ip, w);
[parameters]
n :data length (int)
n >= 2, n = power of 2
a[0...n-1] :input/output data (double *)
<case1>
input data
a[j] = A[j], 0<j<n
a[0] = A[n]
output data
a[k] = S[k], 0<=k<n
<case2>
output data
a[k] = S[k], 0<k<n
a[0] = S[n]
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n/2)
strictly,
length of ip >=
2+(1<<(int)(log(n/2+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n*5/4-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
ddst(n, -1, a, ip, w);
is
a[0] *= 0.5;
ddst(n, 1, a, ip, w);
for (j = 0; j <= n - 1; j++) {
a[j] *= 2.0 / n;
}
.
-------- Cosine Transform of RDFT (Real Symmetric DFT) --------
[definition]
C[k] = sum_j=0^n a[j]*cos(pi*j*k/n), 0<=k<=n
[usage]
ip[0] = 0; // first time only
dfct(n, a, t, ip, w);
[parameters]
n :data length - 1 (int)
n >= 2, n = power of 2
a[0...n] :input/output data (double *)
output data
a[k] = C[k], 0<=k<=n
t[0...n/2] :work area (double *)
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n/4)
strictly,
length of ip >=
2+(1<<(int)(log(n/4+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n*5/8-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
a[0] *= 0.5;
a[n] *= 0.5;
dfct(n, a, t, ip, w);
is
a[0] *= 0.5;
a[n] *= 0.5;
dfct(n, a, t, ip, w);
for (j = 0; j <= n; j++) {
a[j] *= 2.0 / n;
}
.
-------- Sine Transform of RDFT (Real Anti-symmetric DFT) --------
[definition]
S[k] = sum_j=1^n-1 a[j]*sin(pi*j*k/n), 0<k<n
[usage]
ip[0] = 0; // first time only
dfst(n, a, t, ip, w);
[parameters]
n :data length + 1 (int)
n >= 2, n = power of 2
a[0...n-1] :input/output data (double *)
output data
a[k] = S[k], 0<k<n
(a[0] is used for work area)
t[0...n/2-1] :work area (double *)
ip[0...*] :work area for bit reversal (int *)
length of ip >= 2+sqrt(n/4)
strictly,
length of ip >=
2+(1<<(int)(log(n/4+0.5)/log(2))/2).
ip[0],ip[1] are pointers of the cos/sin table.
w[0...n*5/8-1] :cos/sin table (double *)
w[],ip[] are initialized if ip[0] == 0.
[remark]
Inverse of
dfst(n, a, t, ip, w);
is
dfst(n, a, t, ip, w);
for (j = 1; j <= n - 1; j++) {
a[j] *= 2.0 / n;
}
.
Appendix :
The cos/sin table is recalculated when the larger table required.
w[] and ip[] are compatible with all routines.
*/
inline void cdft(int, int, double *, int *, double *);
inline void rdft(int, int, double *, int *, double *);
inline void ddct(int, int, double *, int *, double *);
inline void ddst(int, int, double *, int *, double *);
inline void dfct(int, double *, double *, int *, double *);
inline void dfst(int, double *, double *, int *, double *);
inline void makewt(int nw, int* ip, double* w);
inline void makect(int nc, int* ip, double* c);
inline void bitrv2(int n, int* ip, double* a);
inline void cftfsub(int n, double* a, double* w);
inline void cftbsub(int n, double* a, double* w);
inline void rftfsub(int n, double* a, int nc, double* c);
inline void rftbsub(int n, double* a, int nc, double* c);
inline void cft1st(int n, double* a, double* w);
inline void cftmdl(int n, int l, double* a, double* w);
inline Ooura_FFT::Ooura_FFT(int _frame_size, int _channels){
frame_size = _frame_size;
channels = _channels;
a = new double *[channels];
for (int i = 0; i < channels; i++)
a[i] = new double[frame_size];
w = new double *[channels];
for (int i = 0; i < channels; i++)
w[i] = new double[frame_size];
ip = new int *[channels];
for (int i = 0; i < channels; i++)
ip[i] = new int[(int)(sqrt(frame_size / 2)) + 1];
}
inline Ooura_FFT::~Ooura_FFT() {
for (int i = 0; i < channels; i++) {
delete[] a[i];
delete[] w[i];
delete[] ip[i];
}
delete[] a;
delete[] w;
delete[] ip;
}
inline void Ooura_FFT::FFT(double **data) {
int j;
#pragma omp parallel for
for (j = 0; j < channels; j++) {
double *t;
t = data[j];
ip[j][0] = 0;
for (int i = 0; i < frame_size; i++)
a[j][i] = t[i];
rdft(frame_size, 1, a[j], ip[j], w[j]);
for (int i = 0; i < frame_size; i += 2) {
t[i] = a[j][i];
t[i + 1] = -a[j][i + 1];
}
t[1] = 0;
t[frame_size] = a[j][1];
t[frame_size + 1] = 0;
}
}
void Ooura_FFT::FFT(double ** data, int target_channels){
int j;
#pragma omp parallel for
for (j = 0; j < target_channels; j++) {
double *t;
t = data[j];
ip[j][0] = 0;
for (int i = 0; i < frame_size; i++)
a[j][i] = t[i];
rdft(frame_size, 1, a[j], ip[j], w[j]);
for (int i = 0; i < frame_size; i += 2) {
t[i] = a[j][i];
t[i + 1] = -a[j][i + 1];
}
t[1] = 0;
t[frame_size] = a[j][1];
t[frame_size + 1] = 0;
}
}
inline void Ooura_FFT::FFT(double *data) {
int j;
#pragma omp parallel for
for (j = 0; j < channels; j++) {
double *t;
t = &data[j*(frame_size+2)];
ip[j][0] = 0;
for (int i = 0; i < frame_size; i++)
a[j][i] = t[i];
rdft(frame_size, 1, a[j], ip[j], w[j]);
for (int i = 0; i < frame_size; i += 2) {
t[i] = a[j][i];
t[i + 1] = -a[j][i + 1];
}
t[1] = 0;
t[frame_size] = a[j][1];
t[frame_size + 1] = 0;
}
}
inline void Ooura_FFT::iFFT(double **data) {
int j;
#pragma omp parallel for
for (j = 0; j < channels; j++) {
double *t;
t = data[j];
ip[j][0] = 0;
for (int i = 0; i < frame_size; i += 2) {
a[j][i] = t[i];
a[j][i + 1] = -t[i + 1];
}
a[j][1] = t[frame_size];
rdft(frame_size, -1, a[j], ip[j], w[j]);
for (int i = 0; i < frame_size; i++) {
a[j][i] *= 2.0;
a[j][i] /= frame_size;
}
for (int i = 0; i < frame_size; i++) {
t[i] = a[j][i];
}
}
}
inline void Ooura_FFT::iFFT(double *data) {
double *t;
t = &data[0*(frame_size+2)];
ip[0][0] = 0;
for (int i = 0; i < frame_size; i += 2) {
a[0][i] = t[i];
a[0][i + 1] = -t[i + 1];
}
a[0][1] = t[frame_size];
rdft(frame_size, -1, a[0], ip[0], w[0]);
for (int i = 0; i < frame_size; i++) {
a[0][i] *= 2.0;
a[0][i] /= frame_size;
}
for (int i = 0; i < frame_size; i++) {
t[i] = a[0][i];
}
}
inline void Ooura_FFT::SingleFFT(double *data) {
int i;
ip[0][0] = 0;
for (i = 0; i < frame_size; i++)
a[0][i] = data[i];
rdft(frame_size, 1, a[0], ip[0], w[0]);
for (i = 0; i < frame_size; i += 2) {
data[i] = a[0][i];
data[i + 1] = -a[0][i + 1];
}
data[1] = 0;
data[frame_size] = a[0][1];
data[frame_size + 1] = 0;
}
inline void Ooura_FFT::SingleiFFT(double *data) {
int i;
ip[0][0] = 0;
for (i = 0; i < frame_size; i += 2) {
a[0][i] = data[i];
a[0][i + 1] = -data[i + 1];
}
a[0][1] = data[frame_size];
rdft(frame_size, -1, a[0], ip[0], w[0]);
for (i = 0; i < frame_size; i++) {
a[0][i] *= 2.0;
a[0][i] /= frame_size;
}
for (i = 0; i < frame_size; i++) {
data[i] = a[0][i];
}
}
inline void cdft(int n, int isgn, double *a, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void bitrv2(int n, int *ip, double *a);
void bitrv2conj(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void cftbsub(int n, double *a, double *w);
if (n > (ip[0] << 2)) {
makewt(n >> 2, ip, w);
}
if (n > 4) {
if (isgn >= 0) {
bitrv2(n, ip + 2, a);
cftfsub(n, a, w);
} else {
bitrv2conj(n, ip + 2, a);
cftbsub(n, a, w);
}
} else if (n == 4) {
cftfsub(n, a, w);
}
}
inline void rdft(int n, int isgn, double *a, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void makect(int nc, int *ip, double *c);
void bitrv2(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void cftbsub(int n, double *a, double *w);
void rftfsub(int n, double *a, int nc, double *c);
void rftbsub(int n, double *a, int nc, double *c);
int nw, nc;
double xi;
nw = ip[0];
if (n > (nw << 2)) {
nw = n >> 2;
makewt(nw, ip, w);
}
nc = ip[1];
if (n > (nc << 2)) {
nc = n >> 2;
makect(nc, ip, w + nw);
}
if (isgn >= 0) {
if (n > 4) {
bitrv2(n, ip + 2, a);
cftfsub(n, a, w);
rftfsub(n, a, nc, w + nw);
} else if (n == 4) {
cftfsub(n, a, w);
}
xi = a[0] - a[1];
a[0] += a[1];
a[1] = xi;
} else {
a[1] = 0.5 * (a[0] - a[1]);
a[0] -= a[1];
if (n > 4) {
rftbsub(n, a, nc, w + nw);
bitrv2(n, ip + 2, a);
cftbsub(n, a, w);
} else if (n == 4) {
cftfsub(n, a, w);
}
}
}
inline void ddct(int n, int isgn, double *a, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void makect(int nc, int *ip, double *c);
void bitrv2(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void cftbsub(int n, double *a, double *w);
void rftfsub(int n, double *a, int nc, double *c);
void rftbsub(int n, double *a, int nc, double *c);
void dctsub(int n, double *a, int nc, double *c);
int j, nw, nc;
double xr;
nw = ip[0];
if (n > (nw << 2)) {
nw = n >> 2;
makewt(nw, ip, w);
}
nc = ip[1];
if (n > nc) {
nc = n;
makect(nc, ip, w + nw);
}
if (isgn < 0) {
xr = a[n - 1];
for (j = n - 2; j >= 2; j -= 2) {
a[j + 1] = a[j] - a[j - 1];
a[j] += a[j - 1];
}
a[1] = a[0] - xr;
a[0] += xr;
if (n > 4) {
rftbsub(n, a, nc, w + nw);
bitrv2(n, ip + 2, a);
cftbsub(n, a, w);
} else if (n == 4) {
cftfsub(n, a, w);
}
}
dctsub(n, a, nc, w + nw);
if (isgn >= 0) {
if (n > 4) {
bitrv2(n, ip + 2, a);
cftfsub(n, a, w);
rftfsub(n, a, nc, w + nw);
} else if (n == 4) {
cftfsub(n, a, w);
}
xr = a[0] - a[1];
a[0] += a[1];
for (j = 2; j < n; j += 2) {
a[j - 1] = a[j] - a[j + 1];
a[j] += a[j + 1];
}
a[n - 1] = xr;
}
}
inline void ddst(int n, int isgn, double *a, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void makect(int nc, int *ip, double *c);
void bitrv2(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void cftbsub(int n, double *a, double *w);
void rftfsub(int n, double *a, int nc, double *c);
void rftbsub(int n, double *a, int nc, double *c);
void dstsub(int n, double *a, int nc, double *c);
int j, nw, nc;
double xr;
nw = ip[0];
if (n > (nw << 2)) {
nw = n >> 2;
makewt(nw, ip, w);
}
nc = ip[1];
if (n > nc) {
nc = n;
makect(nc, ip, w + nw);
}
if (isgn < 0) {
xr = a[n - 1];
for (j = n - 2; j >= 2; j -= 2) {
a[j + 1] = -a[j] - a[j - 1];
a[j] -= a[j - 1];
}
a[1] = a[0] + xr;
a[0] -= xr;
if (n > 4) {
rftbsub(n, a, nc, w + nw);
bitrv2(n, ip + 2, a);
cftbsub(n, a, w);
} else if (n == 4) {
cftfsub(n, a, w);
}
}
dstsub(n, a, nc, w + nw);
if (isgn >= 0) {
if (n > 4) {
bitrv2(n, ip + 2, a);
cftfsub(n, a, w);
rftfsub(n, a, nc, w + nw);
} else if (n == 4) {
cftfsub(n, a, w);
}
xr = a[0] - a[1];
a[0] += a[1];
for (j = 2; j < n; j += 2) {
a[j - 1] = -a[j] - a[j + 1];
a[j] -= a[j + 1];
}
a[n - 1] = -xr;
}
}
inline void dfct(int n, double *a, double *t, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void makect(int nc, int *ip, double *c);
void bitrv2(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void rftfsub(int n, double *a, int nc, double *c);
void dctsub(int n, double *a, int nc, double *c);
int j, k, l, m, mh, nw, nc;
double xr, xi, yr, yi;
nw = ip[0];
if (n > (nw << 3)) {
nw = n >> 3;
makewt(nw, ip, w);
}
nc = ip[1];
if (n > (nc << 1)) {
nc = n >> 1;
makect(nc, ip, w + nw);
}
m = n >> 1;
yi = a[m];
xi = a[0] + a[n];
a[0] -= a[n];
t[0] = xi - yi;
t[m] = xi + yi;
if (n > 2) {
mh = m >> 1;
for (j = 1; j < mh; j++) {
k = m - j;
xr = a[j] - a[n - j];
xi = a[j] + a[n - j];
yr = a[k] - a[n - k];
yi = a[k] + a[n - k];
a[j] = xr;
a[k] = yr;
t[j] = xi - yi;
t[k] = xi + yi;
}
t[mh] = a[mh] + a[n - mh];
a[mh] -= a[n - mh];
dctsub(m, a, nc, w + nw);
if (m > 4) {
bitrv2(m, ip + 2, a);
cftfsub(m, a, w);
rftfsub(m, a, nc, w + nw);
} else if (m == 4) {
cftfsub(m, a, w);
}
a[n - 1] = a[0] - a[1];
a[1] = a[0] + a[1];
for (j = m - 2; j >= 2; j -= 2) {
a[2 * j + 1] = a[j] + a[j + 1];
a[2 * j - 1] = a[j] - a[j + 1];
}
l = 2;
m = mh;
while (m >= 2) {
dctsub(m, t, nc, w + nw);
if (m > 4) {
bitrv2(m, ip + 2, t);
cftfsub(m, t, w);
rftfsub(m, t, nc, w + nw);
} else if (m == 4) {
cftfsub(m, t, w);
}
a[n - l] = t[0] - t[1];
a[l] = t[0] + t[1];
k = 0;
for (j = 2; j < m; j += 2) {
k += l << 2;
a[k - l] = t[j] - t[j + 1];
a[k + l] = t[j] + t[j + 1];
}
l <<= 1;
mh = m >> 1;
for (j = 0; j < mh; j++) {
k = m - j;
t[j] = t[m + k] - t[m + j];
t[k] = t[m + k] + t[m + j];
}
t[mh] = t[m + mh];
m = mh;
}
a[l] = t[0];
a[n] = t[2] - t[1];
a[0] = t[2] + t[1];
} else {
a[1] = a[0];
a[2] = t[0];
a[0] = t[1];
}
}
inline void dfst(int n, double *a, double *t, int *ip, double *w) {
void makewt(int nw, int *ip, double *w);
void makect(int nc, int *ip, double *c);
void bitrv2(int n, int *ip, double *a);
void cftfsub(int n, double *a, double *w);
void rftfsub(int n, double *a, int nc, double *c);
void dstsub(int n, double *a, int nc, double *c);
int j, k, l, m, mh, nw, nc;
double xr, xi, yr, yi;
nw = ip[0];
if (n > (nw << 3)) {
nw = n >> 3;
makewt(nw, ip, w);
}
nc = ip[1];
if (n > (nc << 1)) {
nc = n >> 1;
makect(nc, ip, w + nw);
}
if (n > 2) {
m = n >> 1;
mh = m >> 1;
for (j = 1; j < mh; j++) {
k = m - j;
xr = a[j] + a[n - j];
xi = a[j] - a[n - j];
yr = a[k] + a[n - k];
yi = a[k] - a[n - k];
a[j] = xr;
a[k] = yr;
t[j] = xi + yi;
t[k] = xi - yi;
}
t[0] = a[mh] - a[n - mh];
a[mh] += a[n - mh];
a[0] = a[m];
dstsub(m, a, nc, w + nw);
if (m > 4) {
bitrv2(m, ip + 2, a);
cftfsub(m, a, w);
rftfsub(m, a, nc, w + nw);
} else if (m == 4) {
cftfsub(m, a, w);
}
a[n - 1] = a[1] - a[0];
a[1] = a[0] + a[1];
for (j = m - 2; j >= 2; j -= 2) {
a[2 * j + 1] = a[j] - a[j + 1];
a[2 * j - 1] = -a[j] - a[j + 1];
}
l = 2;
m = mh;
while (m >= 2) {
dstsub(m, t, nc, w + nw);
if (m > 4) {
bitrv2(m, ip + 2, t);
cftfsub(m, t, w);
rftfsub(m, t, nc, w + nw);
} else if (m == 4) {
cftfsub(m, t, w);
}
a[n - l] = t[1] - t[0];
a[l] = t[0] + t[1];
k = 0;
for (j = 2; j < m; j += 2) {
k += l << 2;
a[k - l] = -t[j] - t[j + 1];
a[k + l] = t[j] - t[j + 1];
}
l <<= 1;
mh = m >> 1;
for (j = 1; j < mh; j++) {
k = m - j;
t[j] = t[m + k] + t[m + j];
t[k] = t[m + k] - t[m + j];
}
t[0] = t[m + mh];
m = mh;
}
a[l] = t[0];
}
a[0] = 0;
}
/* -------- initializing routines -------- */
inline void makewt(int nw, int *ip, double *w) {
void bitrv2(int n, int *ip, double *a);
int j, nwh;
double delta, x, y;
ip[0] = nw;
ip[1] = 1;
if (nw > 2) {
nwh = nw >> 1;
delta = atan(1.0) / nwh;
w[0] = 1;
w[1] = 0;
w[nwh] = cos(delta * nwh);
w[nwh + 1] = w[nwh];
if (nwh > 2) {
for (j = 2; j < nwh; j += 2) {
x = cos(delta * j);
y = sin(delta * j);
w[j] = x;
w[j + 1] = y;
w[nw - j] = y;
w[nw - j + 1] = x;
}
bitrv2(nw, ip + 2, w);
}
}
}
inline void makect(int nc, int *ip, double *c) {
int j, nch;
double delta;
ip[1] = nc;
if (nc > 1) {
nch = nc >> 1;
delta = atan(1.0) / nch;
c[0] = cos(delta * nch);
c[nch] = 0.5 * c[0];
for (j = 1; j < nch; j++) {
c[j] = 0.5 * cos(delta * j);
c[nc - j] = 0.5 * sin(delta * j);
}
}
}
/* -------- child routines -------- */
inline void bitrv2(int n, int *ip, double *a) {
int j, j1, k, k1, l, m, m2;
double xr, xi, yr, yi;
ip[0] = 0;
l = n;
m = 1;
while ((m << 3) < l) {
l >>= 1;
for (j = 0; j < m; j++) {
ip[m + j] = ip[j] + l;
}
m <<= 1;
}
m2 = 2 * m;
if ((m << 3) == l) {
for (k = 0; k < m; k++) {
for (j = 0; j < k; j++) {
j1 = 2 * j + ip[k];
k1 = 2 * k + ip[j];
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += 2 * m2;
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 -= m2;
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += 2 * m2;
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
}
j1 = 2 * k + m2 + ip[k];
k1 = j1 + m2;
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
}
} else {
for (k = 1; k < m; k++) {
for (j = 0; j < k; j++) {
j1 = 2 * j + ip[k];
k1 = 2 * k + ip[j];
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += m2;
xr = a[j1];
xi = a[j1 + 1];
yr = a[k1];
yi = a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
}
}
}
}
inline void bitrv2conj(int n, int *ip, double *a) {
int j, j1, k, k1, l, m, m2;
double xr, xi, yr, yi;
ip[0] = 0;
l = n;
m = 1;
while ((m << 3) < l) {
l >>= 1;
for (j = 0; j < m; j++) {
ip[m + j] = ip[j] + l;
}
m <<= 1;
}
m2 = 2 * m;
if ((m << 3) == l) {
for (k = 0; k < m; k++) {
for (j = 0; j < k; j++) {
j1 = 2 * j + ip[k];
k1 = 2 * k + ip[j];
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += 2 * m2;
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 -= m2;
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += 2 * m2;
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
}
k1 = 2 * k + ip[k];
a[k1 + 1] = -a[k1 + 1];
j1 = k1 + m2;
k1 = j1 + m2;
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
k1 += m2;
a[k1 + 1] = -a[k1 + 1];
}
} else {
a[1] = -a[1];
a[m2 + 1] = -a[m2 + 1];
for (k = 1; k < m; k++) {
for (j = 0; j < k; j++) {
j1 = 2 * j + ip[k];
k1 = 2 * k + ip[j];
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
j1 += m2;
k1 += m2;
xr = a[j1];
xi = -a[j1 + 1];
yr = a[k1];
yi = -a[k1 + 1];
a[j1] = yr;
a[j1 + 1] = yi;
a[k1] = xr;
a[k1 + 1] = xi;
}
k1 = 2 * k + ip[k];
a[k1 + 1] = -a[k1 + 1];
a[k1 + m2 + 1] = -a[k1 + m2 + 1];
}
}
}
inline void cftfsub(int n, double *a, double *w) {
void cft1st(int n, double *a, double *w);
void cftmdl(int n, int l, double *a, double *w);
int j, j1, j2, j3, l;
double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
l = 2;
if (n > 8) {
cft1st(n, a, w);
l = 8;
while ((l << 2) < n) {
cftmdl(n, l, a, w);
l <<= 2;
}
}
if ((l << 2) == n) {
for (j = 0; j < l; j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = a[j + 1] + a[j1 + 1];
x1r = a[j] - a[j1];
x1i = a[j + 1] - a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
a[j2] = x0r - x2r;
a[j2 + 1] = x0i - x2i;
a[j1] = x1r - x3i;
a[j1 + 1] = x1i + x3r;
a[j3] = x1r + x3i;
a[j3 + 1] = x1i - x3r;
}
} else {
for (j = 0; j < l; j += 2) {
j1 = j + l;
x0r = a[j] - a[j1];
x0i = a[j + 1] - a[j1 + 1];
a[j] += a[j1];
a[j + 1] += a[j1 + 1];
a[j1] = x0r;
a[j1 + 1] = x0i;
}
}
}
inline void cftbsub(int n, double *a, double *w) {
void cft1st(int n, double *a, double *w);
void cftmdl(int n, int l, double *a, double *w);
int j, j1, j2, j3, l;
double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
l = 2;
if (n > 8) {
cft1st(n, a, w);
l = 8;
while ((l << 2) < n) {
cftmdl(n, l, a, w);
l <<= 2;
}
}
if ((l << 2) == n) {
for (j = 0; j < l; j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = -a[j + 1] - a[j1 + 1];
x1r = a[j] - a[j1];
x1i = -a[j + 1] + a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i - x2i;
a[j2] = x0r - x2r;
a[j2 + 1] = x0i + x2i;
a[j1] = x1r - x3i;
a[j1 + 1] = x1i - x3r;
a[j3] = x1r + x3i;
a[j3 + 1] = x1i + x3r;
}
} else {
for (j = 0; j < l; j += 2) {
j1 = j + l;
x0r = a[j] - a[j1];
x0i = -a[j + 1] + a[j1 + 1];
a[j] += a[j1];
a[j + 1] = -a[j + 1] - a[j1 + 1];
a[j1] = x0r;
a[j1 + 1] = x0i;
}
}
}
inline void cft1st(int n, double *a, double *w) {
int j, k1, k2;
double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
x0r = a[0] + a[2];
x0i = a[1] + a[3];
x1r = a[0] - a[2];
x1i = a[1] - a[3];
x2r = a[4] + a[6];
x2i = a[5] + a[7];
x3r = a[4] - a[6];
x3i = a[5] - a[7];
a[0] = x0r + x2r;
a[1] = x0i + x2i;
a[4] = x0r - x2r;
a[5] = x0i - x2i;
a[2] = x1r - x3i;
a[3] = x1i + x3r;
a[6] = x1r + x3i;
a[7] = x1i - x3r;
wk1r = w[2];
x0r = a[8] + a[10];
x0i = a[9] + a[11];
x1r = a[8] - a[10];
x1i = a[9] - a[11];
x2r = a[12] + a[14];
x2i = a[13] + a[15];
x3r = a[12] - a[14];
x3i = a[13] - a[15];
a[8] = x0r + x2r;
a[9] = x0i + x2i;
a[12] = x2i - x0i;
a[13] = x0r - x2r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[10] = wk1r * (x0r - x0i);
a[11] = wk1r * (x0r + x0i);
x0r = x3i + x1r;
x0i = x3r - x1i;
a[14] = wk1r * (x0i - x0r);
a[15] = wk1r * (x0i + x0r);
k1 = 0;
for (j = 16; j < n; j += 16) {
k1 += 2;
k2 = 2 * k1;
wk2r = w[k1];
wk2i = w[k1 + 1];
wk1r = w[k2];
wk1i = w[k2 + 1];
wk3r = wk1r - 2 * wk2i * wk1i;
wk3i = 2 * wk2i * wk1r - wk1i;
x0r = a[j] + a[j + 2];
x0i = a[j + 1] + a[j + 3];
x1r = a[j] - a[j + 2];
x1i = a[j + 1] - a[j + 3];
x2r = a[j + 4] + a[j + 6];
x2i = a[j + 5] + a[j + 7];
x3r = a[j + 4] - a[j + 6];
x3i = a[j + 5] - a[j + 7];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
x0r -= x2r;
x0i -= x2i;
a[j + 4] = wk2r * x0r - wk2i * x0i;
a[j + 5] = wk2r * x0i + wk2i * x0r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[j + 2] = wk1r * x0r - wk1i * x0i;
a[j + 3] = wk1r * x0i + wk1i * x0r;
x0r = x1r + x3i;
x0i = x1i - x3r;
a[j + 6] = wk3r * x0r - wk3i * x0i;
a[j + 7] = wk3r * x0i + wk3i * x0r;
wk1r = w[k2 + 2];
wk1i = w[k2 + 3];
wk3r = wk1r - 2 * wk2r * wk1i;
wk3i = 2 * wk2r * wk1r - wk1i;
x0r = a[j + 8] + a[j + 10];
x0i = a[j + 9] + a[j + 11];
x1r = a[j + 8] - a[j + 10];
x1i = a[j + 9] - a[j + 11];
x2r = a[j + 12] + a[j + 14];
x2i = a[j + 13] + a[j + 15];
x3r = a[j + 12] - a[j + 14];
x3i = a[j + 13] - a[j + 15];
a[j + 8] = x0r + x2r;
a[j + 9] = x0i + x2i;
x0r -= x2r;
x0i -= x2i;
a[j + 12] = -wk2i * x0r - wk2r * x0i;
a[j + 13] = -wk2i * x0i + wk2r * x0r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[j + 10] = wk1r * x0r - wk1i * x0i;
a[j + 11] = wk1r * x0i + wk1i * x0r;
x0r = x1r + x3i;
x0i = x1i - x3r;
a[j + 14] = wk3r * x0r - wk3i * x0i;
a[j + 15] = wk3r * x0i + wk3i * x0r;
}
}
inline void cftmdl(int n, int l, double *a, double *w) {
int j, j1, j2, j3, k, k1, k2, m, m2;
double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
m = l << 2;
for (j = 0; j < l; j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = a[j + 1] + a[j1 + 1];
x1r = a[j] - a[j1];
x1i = a[j + 1] - a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
a[j2] = x0r - x2r;
a[j2 + 1] = x0i - x2i;
a[j1] = x1r - x3i;
a[j1 + 1] = x1i + x3r;
a[j3] = x1r + x3i;
a[j3 + 1] = x1i - x3r;
}
wk1r = w[2];
for (j = m; j < l + m; j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = a[j + 1] + a[j1 + 1];
x1r = a[j] - a[j1];
x1i = a[j + 1] - a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
a[j2] = x2i - x0i;
a[j2 + 1] = x0r - x2r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[j1] = wk1r * (x0r - x0i);
a[j1 + 1] = wk1r * (x0r + x0i);
x0r = x3i + x1r;
x0i = x3r - x1i;
a[j3] = wk1r * (x0i - x0r);
a[j3 + 1] = wk1r * (x0i + x0r);
}
k1 = 0;
m2 = 2 * m;
for (k = m2; k < n; k += m2) {
k1 += 2;
k2 = 2 * k1;
wk2r = w[k1];
wk2i = w[k1 + 1];
wk1r = w[k2];
wk1i = w[k2 + 1];
wk3r = wk1r - 2 * wk2i * wk1i;
wk3i = 2 * wk2i * wk1r - wk1i;
for (j = k; j < l + k; j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = a[j + 1] + a[j1 + 1];
x1r = a[j] - a[j1];
x1i = a[j + 1] - a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
x0r -= x2r;
x0i -= x2i;
a[j2] = wk2r * x0r - wk2i * x0i;
a[j2 + 1] = wk2r * x0i + wk2i * x0r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[j1] = wk1r * x0r - wk1i * x0i;
a[j1 + 1] = wk1r * x0i + wk1i * x0r;
x0r = x1r + x3i;
x0i = x1i - x3r;
a[j3] = wk3r * x0r - wk3i * x0i;
a[j3 + 1] = wk3r * x0i + wk3i * x0r;
}
wk1r = w[k2 + 2];
wk1i = w[k2 + 3];
wk3r = wk1r - 2 * wk2r * wk1i;
wk3i = 2 * wk2r * wk1r - wk1i;
for (j = k + m; j < l + (k + m); j += 2) {
j1 = j + l;
j2 = j1 + l;
j3 = j2 + l;
x0r = a[j] + a[j1];
x0i = a[j + 1] + a[j1 + 1];
x1r = a[j] - a[j1];
x1i = a[j + 1] - a[j1 + 1];
x2r = a[j2] + a[j3];
x2i = a[j2 + 1] + a[j3 + 1];
x3r = a[j2] - a[j3];
x3i = a[j2 + 1] - a[j3 + 1];
a[j] = x0r + x2r;
a[j + 1] = x0i + x2i;
x0r -= x2r;
x0i -= x2i;
a[j2] = -wk2i * x0r - wk2r * x0i;
a[j2 + 1] = -wk2i * x0i + wk2r * x0r;
x0r = x1r - x3i;
x0i = x1i + x3r;
a[j1] = wk1r * x0r - wk1i * x0i;
a[j1 + 1] = wk1r * x0i + wk1i * x0r;
x0r = x1r + x3i;
x0i = x1i - x3r;
a[j3] = wk3r * x0r - wk3i * x0i;
a[j3 + 1] = wk3r * x0i + wk3i * x0r;
}
}
}
inline void rftfsub(int n, double *a, int nc, double *c) {
int j, k, kk, ks, m;
double wkr, wki, xr, xi, yr, yi;
m = n >> 1;
ks = 2 * nc / m;
kk = 0;
for (j = 2; j < m; j += 2) {
k = n - j;
kk += ks;
wkr = 0.5 - c[nc - kk];
wki = c[kk];
xr = a[j] - a[k];
xi = a[j + 1] + a[k + 1];
yr = wkr * xr - wki * xi;
yi = wkr * xi + wki * xr;
a[j] -= yr;
a[j + 1] -= yi;
a[k] += yr;
a[k + 1] -= yi;
}
}
inline void rftbsub(int n, double *a, int nc, double *c) {
int j, k, kk, ks, m;
double wkr, wki, xr, xi, yr, yi;
a[1] = -a[1];
m = n >> 1;
ks = 2 * nc / m;
kk = 0;
for (j = 2; j < m; j += 2) {
k = n - j;
kk += ks;
wkr = 0.5 - c[nc - kk];
wki = c[kk];
xr = a[j] - a[k];
xi = a[j + 1] + a[k + 1];
yr = wkr * xr + wki * xi;
yi = wkr * xi - wki * xr;
a[j] -= yr;
a[j + 1] = yi - a[j + 1];
a[k] += yr;
a[k + 1] = yi - a[k + 1];
}
a[m + 1] = -a[m + 1];
}
inline void dctsub(int n, double *a, int nc, double *c) {
int j, k, kk, ks, m;
double wkr, wki, xr;
m = n >> 1;
ks = nc / n;
kk = 0;
for (j = 1; j < m; j++) {
k = n - j;
kk += ks;
wkr = c[kk] - c[nc - kk];
wki = c[kk] + c[nc - kk];
xr = wki * a[j] - wkr * a[k];
a[j] = wkr * a[j] + wki * a[k];
a[k] = xr;
}
a[m] *= c[0];
}
inline void dstsub(int n, double *a, int nc, double *c) {
int j, k, kk, ks, m;
double wkr, wki, xr;
m = n >> 1;
ks = nc / n;
kk = 0;
for (j = 1; j < m; j++) {
k = n - j;
kk += ks;
wkr = c[kk] - c[nc - kk];
wki = c[kk] + c[nc - kk];
xr = wki * a[k] - wkr * a[j];
a[k] = wkr * a[k] + wki * a[j];
a[j] = xr;
}
a[m] *= c[0];
}
#endif
|
collapse-nonrectangle.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
void vmul(int*a, int*b, int*c, int N){
#pragma omp target map(to: a[0:N],b[0:N]) map(from:c[0:N*N])
#pragma omp teams distribute parallel for collapse(2)
for(int i=0;i<N;i++)
for(int j=0;j<N-i;j++){
c[i*N+j]=a[i]*b[j];
}
}
int main(){
const int N = 1000;
int a[N],b[N],c[N*N],validate[N*N];
int flag=-1; // Mark Success
for(int i=0;i<N;i++){
a[i]=i+1;
for(int j=0;j<N-i;j++){
b[j]=j+2;
validate[i*N+j]=a[i]*b[j];
}
}
vmul(a,b,c,N);
for(int i=0;i<N;i++){
for(int j=0;j<N-i;j++){
if( validate[i*N+j] != a[i]*b[j])
printf("First fail: c[%d](%d) != validate[%d](%d)\n",i,c[i],i,validate[i]);
flag = i;
}
}
return 0;
}
|
cpl_geom_img-test.c | /*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include <math.h>
#include <cpl_image_gen.h>
#include <cpl_imagelist_io.h>
#include <cpl_image_io.h>
#include <cpl_image_basic.h>
#include <cpl_bivector.h>
#include <cpl_vector.h>
#include <cpl_memory.h>
#include <cpl_msg.h>
#include <cpl_test.h>
#include <cpl_plot.h>
/* cpl_drand() */
#include <cpl_tools.h>
#include "cpl_apertures.h"
#include "cpl_geom_img.h"
/*-----------------------------------------------------------------------------
Define
-----------------------------------------------------------------------------*/
#ifndef IMAGESZ
#define IMAGESZ 256
#endif
#define NFRAMES 10
#define NSIGMAS 4
#define MAX_SHIFT_ERROR1 15
#define MAX_SHIFT_ERROR2 0.1
/*-----------------------------------------------------------------------------
Pricate functions
-----------------------------------------------------------------------------*/
static void cpl_geom_img_offset_saa_one(cpl_kernel);
static
void cpl_geom_img_offset_saa_bench(cpl_geom_combine, int, int, int, int, int);
static void cpl_imagelist_fill_shifted(cpl_imagelist *, cpl_size,
const double *, const double *);
/**@{*/
/*-----------------------------------------------------------------------------
Main
-----------------------------------------------------------------------------*/
int main(void)
{
/* These kernels preserve the actual pixel-values */
cpl_kernel kernels[] = {CPL_KERNEL_DEFAULT,
CPL_KERNEL_NEAREST};
const cpl_geom_combine geoms[] = {CPL_GEOM_INTERSECT, CPL_GEOM_UNION,
CPL_GEOM_FIRST};
/* Shift by non-integer amount to evaluate resampling */
const double off_x_init[] = { 0.0, -6.5, -18.5, 54.5, 33.5,
46.5, -3.5, 36.5, 42.5, -13.5};
const double off_y_init[] = { 0.0, 13.5, 3.5, 8.5, 32.5,
22.5, 18.5, -56.5, 3.5, 10.5};
cpl_imagelist * iset;
cpl_image * img;
cpl_bivector * offs_est;
cpl_vector * off_vec_x;
cpl_vector * off_vec_y;
cpl_bivector * offs_ref;
cpl_apertures * aperts;
int naperts;
cpl_bivector * aperts_pos;
cpl_vector * aperts_pos_x;
cpl_vector * aperts_pos_y;
cpl_vector * correl;
const double psigmas[] = {5, 2, 1, 0.5};
cpl_vector * sigmas;
cpl_image ** combined;
int i;
cpl_size pos;
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
/* Verify the test data */
cpl_test_eq(sizeof(off_x_init), NFRAMES * sizeof(off_x_init[0]));
cpl_test_eq(sizeof(off_y_init), sizeof(off_x_init));
cpl_test_eq(sizeof(psigmas), NSIGMAS * sizeof(psigmas[0]));
for (i = 0; i < NFRAMES; i++) {
cpl_test_leq(fabs(off_x_init[i]), IMAGESZ);
cpl_test_leq(fabs(off_y_init[i]), IMAGESZ);
}
cpl_geom_img_offset_saa_one(CPL_KERNEL_DEFAULT);
cpl_geom_img_offset_saa_one(CPL_KERNEL_NEAREST);
if (cpl_msg_get_level() <= CPL_MSG_INFO) {
const double tprev = cpl_test_get_cputime();
const cpl_flops fprev = cpl_tools_get_flops();
double tpost, cputime;
cpl_flops fpost, nflops;
#ifndef _OPENMP
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 10, 16, 4*IMAGESZ,
4*IMAGESZ, 0);
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 6, 18, 4*IMAGESZ,
4*IMAGESZ, 1);
#endif
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < 8; i++) {
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 6, 18, 4*IMAGESZ,
4*IMAGESZ, 1);
}
tpost = cpl_test_get_cputime();
fpost = cpl_tools_get_flops();
cputime = tpost - tprev;
nflops = fpost - fprev;
cpl_msg_info(cpl_func, "Time to benchmark [s]: %g (%g MFLOP/s)",
cputime,
cputime > 0.0 ? (double)nflops/cputime/1e6 : 0.0);
} else {
cpl_geom_img_offset_saa_bench(CPL_GEOM_FIRST, 1, 4, IMAGESZ/4,
IMAGESZ/4, 1);
}
/* Bivector with 1 zero-valued element */
off_vec_x = cpl_vector_new(1);
cpl_vector_set(off_vec_x, 0, 0.0);
offs_ref = cpl_bivector_wrap_vectors(off_vec_x, off_vec_x);
/* Test with empty imagelist */
iset = cpl_imagelist_new();
combined = cpl_geom_img_offset_saa(iset, offs_ref,
CPL_KERNEL_DEFAULT,
0, 0, CPL_GEOM_FIRST,
NULL, NULL);
cpl_test_error(CPL_ERROR_ILLEGAL_INPUT);
cpl_test_null(combined);
/* Insert one image into imagelist */
img = cpl_image_fill_test_create(IMAGESZ, IMAGESZ);
cpl_imagelist_set(iset, img, 0);
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
const cpl_geom_combine geom = geoms[i];
/* Shift and add */
cpl_msg_info("", "Shift and add single image with geom number %d",
(int)geom);
combined = cpl_geom_img_offset_saa(iset, offs_ref,
CPL_KERNEL_DEFAULT,
0, 0, geom,
NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
cpl_test_nonnull(combined[0]);
cpl_test_nonnull(combined[1]);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
cpl_test_eq(cpl_image_get_min(combined[1]), 1);
cpl_test_eq(cpl_image_get_max(combined[1]), 1);
cpl_test_zero(cpl_image_count_rejected(combined[0]));
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_image_abs(combined[0], cpl_imagelist_get_const(iset, 0), 0.0);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
cpl_bivector_unwrap_vectors(offs_ref);
cpl_vector_delete(off_vec_x);
cpl_imagelist_fill_shifted(iset, NFRAMES-1, off_x_init, off_y_init);
cpl_test_eq(cpl_imagelist_get_size(iset), NFRAMES);
/* Not modified */
off_vec_x = cpl_vector_wrap(NFRAMES, (double*)off_x_init);
off_vec_y = cpl_vector_wrap(NFRAMES, (double*)off_y_init);
offs_est = cpl_bivector_new(NFRAMES);
cpl_vector_copy(cpl_bivector_get_x(offs_est), off_vec_x);
cpl_vector_copy(cpl_bivector_get_y(offs_est), off_vec_y);
/* Distort the estimate */
cpl_vector_add_scalar(cpl_bivector_get_x(offs_est), 2.0);
cpl_vector_add_scalar(cpl_bivector_get_y(offs_est), -3.0);
sigmas = cpl_vector_wrap(NSIGMAS, (double*)psigmas); /* Not modified */
cpl_test_error(CPL_ERROR_NONE);
/* Get some cross-correlation apertures */
aperts = cpl_apertures_extract(cpl_imagelist_get_const(iset, 0), sigmas,
&pos);
cpl_vector_unwrap(sigmas);
cpl_test_nonnull(aperts);
naperts = cpl_apertures_get_size(aperts);
cpl_test_leq(1, naperts);
cpl_msg_info("","Detected %d apertures at sigma %g (%" CPL_SIZE_FORMAT "/%"
CPL_SIZE_FORMAT ")", naperts, psigmas[pos], 1+pos,
(cpl_size)NSIGMAS);
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_apertures_dump(aperts, stdout);
aperts_pos = cpl_bivector_new(naperts);
aperts_pos_x = cpl_bivector_get_x(aperts_pos);
aperts_pos_y = cpl_bivector_get_y(aperts_pos);
for (i=0; i<naperts; i++) {
cpl_vector_set(aperts_pos_x, i, cpl_apertures_get_pos_x(aperts, i+1));
cpl_vector_set(aperts_pos_y, i, cpl_apertures_get_pos_y(aperts, i+1));
}
cpl_apertures_delete(aperts);
cpl_test_error(CPL_ERROR_NONE);
/* Refine the offsets with cpl_geom_img_offset_fine */
cpl_msg_info("","Refine the offsets for %d images using %" CPL_SIZE_FORMAT
" anchors", NFRAMES, cpl_bivector_get_size(aperts_pos));
correl = cpl_vector_new(NFRAMES);
offs_ref = cpl_geom_img_offset_fine(iset, offs_est, aperts_pos,
15, 15, 15, 15, correl);
cpl_test_nonnull(offs_ref);
cpl_test_eq(cpl_bivector_get_size(offs_ref), NFRAMES);
cpl_vector_delete(correl);
cpl_bivector_delete(offs_est);
cpl_bivector_delete(aperts_pos);
cpl_test_vector_abs(cpl_bivector_get_x(offs_ref), off_vec_x,
MAX_SHIFT_ERROR2);
cpl_test_vector_abs(cpl_bivector_get_y(offs_ref), off_vec_y,
MAX_SHIFT_ERROR2);
cpl_test_nonnull(cpl_vector_unwrap(off_vec_x));
cpl_test_nonnull(cpl_vector_unwrap(off_vec_y));
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
const cpl_geom_combine geom = geoms[i];
const int rejmin = 1;
const int rejmax = 1;
const int maximg = NFRAMES - rejmin - rejmax;
/* Called like this, cpl_geom_img_offset_combine() is just
a wrapper around cpl_geom_img_offset_saa() */
cpl_image ** combined2
= cpl_geom_img_offset_combine(iset, offs_ref, 0, NULL, NULL, NULL,
0, 0, 0, 0, rejmin, rejmax, geom);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined2);
/* Shift and add */
cpl_msg_info("", "Shift and add with geom number %d", (int)geom);
combined = cpl_geom_img_offset_saa(iset, offs_ref, CPL_KERNEL_DEFAULT,
rejmin, rejmax, geom, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_image_abs(combined[0], combined2[0], 0.0);
cpl_test_image_abs(combined[1], combined2[1], 0.0);
cpl_image_delete(combined2[0]);
cpl_image_delete(combined2[1]);
cpl_free(combined2);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_leq(cpl_image_get_max(combined[1]), maximg);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), maximg);
cpl_test_leq(1, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
cpl_msg_info("", "Minimum value in contribution map: %g",
cpl_image_get_min(combined[1]));
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
/* Shift and add without bad pixels */
for (i=0; i < NFRAMES; i++) {
cpl_image_accept_all(cpl_imagelist_get(iset, i));
}
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
int ityp;
for (ityp = 0; ityp < (int)(sizeof(kernels)/sizeof(kernels[0]));
ityp++) {
const cpl_geom_combine geom = geoms[i];
const int rejmin = 1;
const int rejmax = 1;
const int maximg = NFRAMES - rejmin - rejmax;
/* Shift and add */
cpl_msg_info("", "Shift and add with geom number %d and kernel "
"type %d", (int)geom, (int)kernels[ityp]);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
rejmin, rejmax, geom,
NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
cpl_test_leq(cpl_image_get_max(combined[1]), maximg);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), maximg);
cpl_test_leq(1, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
cpl_msg_info("", "Minimum value in contribution map: %g",
cpl_image_get_min(combined[1]));
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
}
cpl_bivector_delete(offs_ref);
img = cpl_imagelist_unset(iset, 0);
cpl_imagelist_delete(iset);
/* Shift and add of two uniform images - with no offsets */
iset = cpl_imagelist_new();
cpl_imagelist_set(iset, img, 0);
cpl_image_threshold(img, 1.0, 1.0, 1.0, 1.0);
cpl_image_accept_all(img);
img = cpl_image_duplicate(img);
cpl_imagelist_set(iset, img, 1);
off_vec_x = cpl_vector_new(2);
cpl_vector_fill(off_vec_x, 0.0);
offs_ref = cpl_bivector_wrap_vectors(off_vec_x,
cpl_vector_duplicate(off_vec_x));
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_plot_image("","","", img);
for (i = 0; i < (int)(sizeof(geoms)/sizeof(geoms[0])); i++) {
int ityp;
for (ityp = 0; ityp < (int)(sizeof(kernels)/sizeof(kernels[0]));
ityp++) {
const cpl_geom_combine geom = geoms[i];
double pos_x, pos_y;
cpl_msg_info("", "Shift and add with geom number %d and kernel "
"type %d", (int)geom, (int)kernels[ityp]);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
0, 0, geom, &pos_x, &pos_y);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
cpl_test_eq(cpl_image_get_type(combined[1]), CPL_TYPE_INT);
cpl_test_eq(cpl_image_get_size_x(combined[0]),
cpl_image_get_size_x(combined[1]));
cpl_test_eq(cpl_image_get_size_y(combined[0]),
cpl_image_get_size_y(combined[1]));
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_leq(1, cpl_image_get_min(combined[1]));
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
if (geom == CPL_GEOM_INTERSECT) {
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
/* FIXME: Should at least be 1 */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_FIRST) {
/* FIXME: Minimum value is zero */
cpl_test_leq(0, cpl_image_get_min(combined[1]));
} else if (geom == CPL_GEOM_UNION) {
cpl_test_leq(0, cpl_image_get_min(combined[1]));
}
#ifdef TEST_RESAMPLING
/* Resampling introduces noise at the edge */
/* NB: Comparison works for all modes, due to zero offset ... */
cpl_test_image_abs(combined[0], img, MAX_SHIFT_ERROR2);
#endif
if (cpl_msg_get_level() <= CPL_MSG_DEBUG) {
cpl_image_subtract(combined[0], img);
cpl_plot_image("","","", combined[0]);
}
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
/* Now try to combine two images, the second shifted along the X-axis */
cpl_image_shift(img, -MAX_SHIFT_ERROR1, 0);
cpl_image_accept_all(img);
cpl_vector_set(off_vec_x, 1, MAX_SHIFT_ERROR1);
combined = cpl_geom_img_offset_saa(iset, offs_ref, kernels[ityp],
0, 0, geom, &pos_x, &pos_y);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
if (combined == NULL) continue;
cpl_test_eq(cpl_image_get_max(combined[1]), 2);
if (cpl_image_get_min(combined[1]) == 0) {
cpl_test(cpl_image_count_rejected(combined[0]));
} else {
cpl_test_zero(cpl_image_count_rejected(combined[0]));
}
cpl_test_eq(cpl_image_get_size_y(combined[0]), IMAGESZ);
if (geom == CPL_GEOM_INTERSECT) {
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PI.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CI.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ
- MAX_SHIFT_ERROR1);
} else if (geom == CPL_GEOM_FIRST) {
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PF.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CF.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ);
} else if (geom == CPL_GEOM_UNION) {
cpl_test_eq(cpl_image_get_size_x(combined[0]), IMAGESZ
+ MAX_SHIFT_ERROR1);
#ifdef SAVE_COMBINED
cpl_image_save(combined[0], "PU.fits", CPL_TYPE_DOUBLE,
NULL, CPL_IO_CREATE);
cpl_image_save(combined[1], "CU.fits", CPL_TYPE_UCHAR,
NULL, CPL_IO_CREATE);
#endif
cpl_test_eq(cpl_image_get_min(combined[1]), 1);
}
img = cpl_imagelist_get(iset, 0);
if (cpl_msg_get_level() <= CPL_MSG_DEBUG) {
cpl_plot_image("","","", combined[0]);
if (geom == CPL_GEOM_FIRST) {
cpl_image_subtract(combined[0], img);
cpl_plot_image("","","", combined[0]);
}
}
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
/* Reset offset and 2nd image */
cpl_vector_fill(off_vec_x, 0.0);
img = cpl_image_duplicate(img);
cpl_imagelist_set(iset, img, 1);
}
}
cpl_imagelist_delete(iset);
cpl_bivector_delete(offs_ref);
return cpl_test_end(0);
}
/**@}*/
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Benchmark the CPL function
@param mode CPL_GEOM_INTERSECT, CPL_GEOM_UNION, CPL_GEOM_FIRST
@param nr The number of repeats
@param nz The number of planes
@param nx The image X-size
@param ny The image Y-size
@param no The number of outlier pixels to ignore (both min and max)
@return void
*/
/*----------------------------------------------------------------------------*/
static
void cpl_geom_img_offset_saa_bench(cpl_geom_combine mode, int nr, int nz,
int nx, int ny, int no)
{
cpl_bivector * offset = cpl_bivector_new(nz);
cpl_vector * off_x = cpl_bivector_get_x(offset);
cpl_vector * off_y = cpl_bivector_get_y(offset);
cpl_image * imgd = cpl_image_fill_test_create(nx, ny);
cpl_image * img = cpl_image_cast(imgd, CPL_TYPE_FLOAT);
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_image ** combined;
double cputime = 0.0;
size_t bytes = 0;
cpl_flops nflops = 0;
int ir, iz;
cpl_test_leq(1, nz);
/* Create bivector of shifts, from 0.4 to 0.6 of the pixel range */
/* Create list of shifted images */
cpl_vector_set(off_x, 0, 0.0);
cpl_vector_set(off_y, 0, 0.0);
cpl_image_delete(imgd);
cpl_imagelist_set(imglist, img, 0);
for (iz = 1; iz < nz; iz++) {
cpl_image * copy = cpl_image_duplicate(img);
const int dx = (int)(0.1 * nx - 0.2 * nx * cpl_drand());
const int dy = (int)(0.1 * ny - 0.2 * ny * cpl_drand());
cpl_vector_set(off_x, iz, (double)dx);
cpl_vector_set(off_y, iz, (double)dy);
cpl_image_shift(copy, -dx, -dy);
cpl_image_accept_all(copy);
cpl_imagelist_set(imglist, copy, iz);
}
if (cpl_msg_get_level() <= CPL_MSG_DEBUG)
cpl_bivector_dump(offset, stdout);
bytes = (size_t)nr * cpl_test_get_bytes_imagelist(imglist);
for (ir = 0; ir < nr; ir++) {
const cpl_flops flops = cpl_tools_get_flops();
const double secs = cpl_test_get_cputime();
combined = cpl_geom_img_offset_saa(imglist, offset, CPL_KERNEL_DEFAULT,
no, no, mode, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
cputime += cpl_test_get_cputime() - secs;
nflops += cpl_tools_get_flops() - flops;
if (combined == NULL) continue;
cpl_test_nonnull(combined[0]);
cpl_test_nonnull(combined[1]);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
}
cpl_msg_info(cpl_func, "Time to benchmark with mode=%d, nr=%d, nz=%d, "
"nx=%d, ny=%d, no=%d [s]: %g (%g MFLOP/s)",
mode, nr, nz, nx, ny, no, cputime,
cputime > 0.0 ? (double)nflops/cputime/1e6 : 0.0);
if (cputime > 0.0) {
cpl_msg_info(cpl_func,"Processing rate [MB/s]: %g",
1e-6 * (double)bytes / cputime);
}
cpl_bivector_delete(offset);
cpl_imagelist_delete(imglist);
return;
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Create imagelist of images shifted from the 1st image
@param self Imagelist with one image to append to
@param napp The number of shifted images to append
@param dx The array of n+1 X-shifts (0.0 as 1st element)
@param dy The array of n+1 Y-shifts (0.0 as 1st element)
@return void
@note On return the number of images in self will be n+1
*/
/*----------------------------------------------------------------------------*/
static
void cpl_imagelist_fill_shifted(cpl_imagelist * self, cpl_size napp,
const double * dx, const double * dy)
{
const cpl_image * img = cpl_imagelist_get_const(self, 0);
const cpl_size type = cpl_image_get_type (img);
const cpl_size nx = cpl_image_get_size_x(img);
const cpl_size ny = cpl_image_get_size_x(img);
const cpl_size ishift_0[2] = {0, 0};
const cpl_size ishift_x[2] = {1, 0};
const cpl_size ishift_y[2] = {0, 1};
const double xyradius = CPL_KERNEL_DEF_WIDTH;
cpl_vector * xyprofile = cpl_vector_new(CPL_KERNEL_DEF_SAMPLES);
cpl_polynomial * shift_x = cpl_polynomial_new(2);
cpl_polynomial * shift_y = cpl_polynomial_new(2);
cpl_error_code error;
cpl_size i;
cpl_test_eq(cpl_imagelist_get_size(self), 1);
cpl_test_leq(1, napp);
cpl_test_nonnull(dx);
cpl_test_nonnull(dy);
/* Identity transforms */
error = cpl_polynomial_set_coeff(shift_x, ishift_x, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_polynomial_set_coeff(shift_y, ishift_y, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Resampling profile */
error = cpl_vector_fill_kernel_profile(xyprofile, CPL_KERNEL_DEFAULT,
xyradius);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Append images to image set */
for (i=1; i < napp+1; i++) {
cpl_image * copy = cpl_image_new(nx, ny, type);
/* Shift in X and Y */
error = cpl_polynomial_set_coeff(shift_x, ishift_0, dx[i]);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_polynomial_set_coeff(shift_y, ishift_0, dy[i]);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_image_warp_polynomial(copy, img, shift_x, shift_y,
xyprofile, xyradius,
xyprofile, xyradius);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(self, copy, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
cpl_polynomial_delete(shift_x);
cpl_polynomial_delete(shift_y);
cpl_vector_delete(xyprofile);
cpl_test_eq(cpl_imagelist_get_size(self), napp+1);
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Test the CPL function
@param kernel Kernel type
@return void
@note On return the number of images in self will be n+1
*/
/*----------------------------------------------------------------------------*/
static
void cpl_geom_img_offset_saa_one(cpl_kernel kernel)
{
const int nz = 2 + NFRAMES;
cpl_imagelist * imglist = cpl_imagelist_new();
cpl_image ** combined;
cpl_bivector * offset = cpl_bivector_new(nz);
cpl_vector * off_x = cpl_bivector_get_x(offset);
cpl_vector * off_y = cpl_bivector_get_y(offset);
cpl_error_code error;
cpl_size iz;
cpl_image * central0;
cpl_image * central1;
for (iz = 0; iz < nz; iz++) {
cpl_image * img = cpl_image_new(IMAGESZ, IMAGESZ, CPL_TYPE_FLOAT);
cpl_test_nonnull(img);
/* Insert flat images with known sum of the non-rejected planes */
error = cpl_image_add_scalar(img, (double)(nz - iz - nz/5));
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(imglist, img, iz);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_vector_set(off_x, iz, iz ? cpl_drand() : 0.0);
cpl_vector_set(off_y, iz, iz ? cpl_drand() : 0.0);
}
combined = cpl_geom_img_offset_saa(imglist, offset, kernel, nz/5, nz/4,
CPL_GEOM_INTERSECT, NULL, NULL);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(combined);
error = cpl_image_dump_structure(combined[1], stdout);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_eq(cpl_image_get_max(combined[0]),
(nz - nz/5 - nz/4 + 1) /2.0);
cpl_test_eq(cpl_image_get_max(combined[1]), nz - nz/5
- nz/4);
central0 = cpl_image_extract(combined[0], 3, 3, IMAGESZ - 2, IMAGESZ - 2);
central1 = cpl_image_extract(combined[1], 3, 3, IMAGESZ - 2, IMAGESZ - 2);
cpl_test_eq(cpl_image_get_min(central0),
(nz - nz/5 - nz/4 + 1) /2.0);
cpl_test_eq(cpl_image_get_min(central1), nz - nz/5 - nz/4);
cpl_image_delete(combined[0]);
cpl_image_delete(combined[1]);
cpl_free(combined);
cpl_image_delete(central0);
cpl_image_delete(central1);
cpl_imagelist_delete(imglist);
cpl_bivector_delete(offset);
}
|
GB_unaryop__identity_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_fp32
// op(A') function: GB_tran__identity_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int32_t z ; GB_CAST_SIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_fp32
(
int32_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
geo_particle_iter_mass.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "geo_particle_iter_mass.kernel_inc.h"
int openmp_relng_1st_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_relng_1st_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_relng_1st_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_relng_1st_sg2_small_grids_struct ));
}
int openmp_relng_1st_sg2_small_grids_get_num_compute_units (openmp_relng_1st_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_relng_1st_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_relng_1st_sg2_small_grids_exec (openmp_relng_1st_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_relng_1st_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_inoutput (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_xyzw (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_xoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_yoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_zoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldE (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldB (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_XLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_YLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_ovlp (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_numvec (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_num_ele (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Deltat (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_relng_1st_small_grids_init (openmp_pscmc_env * pe ,openmp_relng_1st_small_grids_struct * kerstr ){
return 0 ;}
void openmp_relng_1st_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_relng_1st_small_grids_struct ));
}
int openmp_relng_1st_small_grids_get_num_compute_units (openmp_relng_1st_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_relng_1st_small_grids_get_xlen (){
return 1 ;}
int openmp_relng_1st_small_grids_exec (openmp_relng_1st_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_relng_1st_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_relng_1st_small_grids_scmc_set_parameter_inoutput (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_xyzw (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_cache (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_xyzw (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_xoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_yoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_zoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldE (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldB (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldB1 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_LFoutJ (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_XLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_YLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_ZLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_ovlp (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_numvec (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_num_ele (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_grid_cache_len (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_cache_length (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_X (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_Y (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_Z (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Mass0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Charge0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Deltat (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Tori_X0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Solve_Err (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_bwd_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_bwd_sg2_small_grids_struct ));
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_get_num_compute_units (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_exec (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_bwd_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_bwd_small_grids_struct ));
}
int openmp_geo_rel_1st_bwd_small_grids_get_num_compute_units (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_bwd_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_bwd_small_grids_exec (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_bwd_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_fwd_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_fwd_sg2_small_grids_struct ));
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_get_num_compute_units (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_exec (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_fwd_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_fwd_small_grids_struct ));
}
int openmp_geo_rel_1st_fwd_small_grids_get_num_compute_units (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_fwd_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_fwd_small_grids_exec (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_fwd_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
|
generator_gemm_common.c | /******************************************************************************
** Copyright (c) 2015-2019, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "clx" ) == 0) ||
(strcmp( i_arch, "cpx" ) == 0) ) {
if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else if ((strcmp( i_arch, "clx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CLX;
} else if ((strcmp( i_arch, "cpx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CPX;
} else {
/* shouldn't happen */
}
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "clx" ) == 0) ||
(strcmp( i_arch, "cpx" ) == 0) ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "clx" ) == 0) ||
(strcmp( i_arch, "cpx" ) == 0) ) {
if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else if ((strcmp( i_arch, "clx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CLX;
} else if ((strcmp( i_arch, "cpx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CPX;
} else {
/* shouldn't happen */
}
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc) {
LIBXSMM_UNUSED(i_xgemm_desc);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop);
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE) {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_help_0, l_b_offset );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done,
const unsigned int i_k_unrolled ) {
/* advance C pointer */
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
}
if (i_k_unrolled == 0) {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE) {
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* advance A pointer */
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
} else {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = i_m_blocking / i_micro_kernel_config->vector_length;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it is possible to generate the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_MIC ||
( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif /*!defined(NDEBUG)*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
} else if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into ymm */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, i_micro_kernel_config->use_masking_a_c, 1, 0 );
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF);
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSLLD,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF,
16);
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 1, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = i_m_blocking/i_micro_kernel_config->vector_length;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* select store instruction */
unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction;
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512 && i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE) ||
( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
/* in case of IGEMM just do some potential conversion to FP */
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* load address of scaling factor from stack */
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
48,
i_gp_reg_mapping->gp_reg_help_1,
0 );
/* broadcast scaling factor into a vector register */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_help_1,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name, 0,
0, 1, 0 );
/* loop over the accumulator, convert and scale */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* convert current accumulator register into FP32 */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF );
/* scale it */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VFMADD213PS,
0,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF,
0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n));
} else {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
} else if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
#if 0
/* push 0x7f800000 on the stack, naninf masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x7f800000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00010000 on the stack, fixup masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00010000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00007fff on the stack, rneadd */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00007fff);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
/* push 0x00000001 on the stack, fixup */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00000001);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 );
#endif
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSRAD,
i_micro_kernel_config->vector_name,
reg_X,
reg_X,
LIBXSMM_X86_VEC_REG_UNDEF,
16);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVDW,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
LIBXSMM_X86_VEC_REG_UNDEF);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, i_micro_kernel_config->use_masking_a_c, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size/2)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2));
}
}
}
}
} else {
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
}
}
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include "CPU/SIMD/aligned_allocator.hpp"
#include "CPU/SIMD/algorithm.hpp"
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()),
num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume),
F_(F),
SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled)
return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(const ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(const ParticleSet& P);
PsiValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).getDistRow(k)));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
LogValueType evaluateGL(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const DistRow& dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist.data(), DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const DisplRow& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid)
: WaveFunctionComponent("J2OrbitalSoA", obj_name), my_table_ID_(p.addTable(p)), j2_ke_corr_helper(p, F)
{
if (myName.empty())
throw std::runtime_error("J2OrbitalSoA object name cannot be empty!");
init(p);
KEcorr = 0.0;
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(myName, tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->KEcorr = KEcorr;
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist.data(), u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).getTempDists());
return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat));
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto& dist = d_table.getTempDists();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist.data(), DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).getTempDists(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).getTempDispls());
return std::exp(static_cast<PsiValueType>(DiffVal));
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat, bool safe_to_delay)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.getOldDists(), old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto& dist = d_table.getTempDists();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.getTempDispls();
const auto& old_dr = d_table.getOldDispls();
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(const ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.getDistRow(iat), cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const auto& displ = d_table.getDisplRow(iat);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
return evaluateGL(P, G, L, true);
}
template<typename FT>
WaveFunctionComponent::LogValueType J2OrbitalSoA<FT>::evaluateGL(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
return LogValue = -LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const auto& dist = d_ee.getDistRow(i);
const auto& displ = d_ee.getDisplRow(i);
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 21044 2017-05-24 22:50:32Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat=NULL;
if ((mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat")))
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = 0;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval,
&mat->rowids, &mat->rlabels, &mat->rmap,
&mat->colptr, &mat->colind, &mat->colval,
&mat->colids, &mat->clabels, &mat->cmap,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rlabels)
nmat->rlabels = gk_icopy(mat->nrows, mat->rlabels,
gk_imalloc(mat->nrows, "gk_csr_Dup: rlabels"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(mat->nrows, mat->rsums,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rsums"));
if (mat->rsizes)
nmat->rsizes = gk_fcopy(mat->nrows, mat->rsizes,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rsizes"));
if (mat->rvols)
nmat->rvols = gk_fcopy(mat->nrows, mat->rvols,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rvols"));
if (mat->rwgts)
nmat->rwgts = gk_fcopy(mat->nrows, mat->rwgts,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rwgts"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->clabels)
nmat->clabels = gk_icopy(mat->ncols, mat->clabels,
gk_imalloc(mat->ncols, "gk_csr_Dup: clabels"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->csums)
nmat->csums = gk_fcopy(mat->ncols, mat->csums,
gk_fmalloc(mat->ncols, "gk_csr_Dup: csums"));
if (mat->csizes)
nmat->csizes = gk_fcopy(mat->ncols, mat->csizes,
gk_fmalloc(mat->ncols, "gk_csr_Dup: csizes"));
if (mat->cvols)
nmat->cvols = gk_fcopy(mat->ncols, mat->cvols,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cvols"));
if (mat->cwgts)
nmat->cwgts = gk_fcopy(mat->ncols, mat->cwgts,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cwgts"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color, 1)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Determines the format of the CSR matrix based on the extension.
\param filename is the name of the file.
\param the user-supplied format.
\returns the type. The extension of the file directly maps to the
name of the format.
*/
/**************************************************************************/
int gk_csr_DetermineFormat(char *filename, int format)
{
if (format != GK_CSR_FMT_AUTO)
return format;
format = GK_CSR_FMT_CSR;
char *extension = gk_getextname(filename);
if (!strcmp(extension, "csr"))
format = GK_CSR_FMT_CSR;
else if (!strcmp(extension, "ijv"))
format = GK_CSR_FMT_IJV;
else if (!strcmp(extension, "cluto"))
format = GK_CSR_FMT_CLUTO;
else if (!strcmp(extension, "metis"))
format = GK_CSR_FMT_METIS;
else if (!strcmp(extension, "binrow"))
format = GK_CSR_FMT_BINROW;
else if (!strcmp(extension, "bincol"))
format = GK_CSR_FMT_BINCOL;
else if (!strcmp(extension, "bijv"))
format = GK_CSR_FMT_BIJV;
gk_free((void **)&extension, LTERM);
return format;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, *iinds, *jinds, ival;
float *rowval=NULL, *vals, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
format = gk_csr_DetermineFormat(filename, format);
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
switch (format) {
case GK_CSR_FMT_BINROW:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_BINCOL:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_IJV:
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && 3*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 3.\n", nnz, readvals);
if (readvals == 0 && 2*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 2.\n", nnz, readvals);
nnz = nrows;
numbering = (numbering ? - 1 : 0);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
for (nrows=0, ncols=0, i=0; i<nnz; i++) {
if (readvals) {
if (fscanf(fpin, "%d %d %f", &iinds[i], &jinds[i], &vals[i]) != 3)
gk_errexit(SIGERR, "Error: Failed to read (i, j, val) for nnz: %zd.\n", i);
}
else {
if (fscanf(fpin, "%d %d", &iinds[i], &jinds[i]) != 2)
gk_errexit(SIGERR, "Error: Failed to read (i, j) value for nnz: %zd.\n", i);
}
iinds[i] += numbering;
jinds[i] += numbering;
if (nrows < iinds[i])
nrows = iinds[i];
if (ncols < jinds[i])
ncols = jinds[i];
}
nrows++;
ncols++;
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
mat = gk_csr_Create();
mat->nrows = nrows;
mat->ncols = ncols;
rowptr = mat->rowptr = gk_zsmalloc(nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
case GK_CSR_FMT_BIJV:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
if (fread(&nnz, sizeof(size_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nnz from file %s!\n", filename);
if (fread(&readvals, sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the readvals from file %s!\n", filename);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
for (i=0; i<nnz; i++) {
if (fread(&(iinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read iinds[i] from file %s!\n", filename);
if (fread(&(jinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read jinds[i] from file %s!\n", filename);
if (readvals) {
if (fread(&(vals[i]), sizeof(float), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read vals[i] from file %s!\n", filename);
}
//printf("%d %d\n", iinds[i], jinds[i]);
}
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
rowptr = mat->rowptr = gk_zsmalloc(mat->nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, mat->nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, mat->nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
/* the following are handled by a common input code, that comes after the switch */
case GK_CSR_FMT_CLUTO:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
break;
case GK_CSR_FMT_METIS:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
break;
case GK_CSR_FMT_CSR:
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
break;
default:
gk_errexit(SIGERR, "Unknown csr format.\n");
return NULL;
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? -1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL, GK_CSR_FMT_BIJV.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
int32_t edge[2];
FILE *fpout;
format = gk_csr_DetermineFormat(filename, format);
switch (format) {
case GK_CSR_FMT_METIS:
if (mat->nrows != mat->ncols || mat->rowptr[mat->nrows]%2 == 1)
gk_errexit(SIGERR, "METIS output format requires a square symmetric matrix.\n");
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
fprintf(fpout, "%d %zd\n", mat->nrows, mat->rowptr[mat->nrows]/2);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++)
fprintf(fpout, " %d", mat->rowind[j]+1);
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
break;
case GK_CSR_FMT_BINROW:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BINCOL:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_IJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
numbering = (numbering ? 1 : 0);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
if (writevals)
fprintf(fpout, "%zd %d %.8f\n", i+numbering, mat->rowind[j]+numbering, mat->rowval[j]);
else
fprintf(fpout, "%zd %d\n", i+numbering, mat->rowind[j]+numbering);
}
}
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BIJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(&(mat->rowptr[mat->nrows]), sizeof(size_t), 1, fpout);
fwrite(&writevals, sizeof(int32_t), 1, fpout);
for (i=0; i<mat->nrows; i++) {
edge[0] = i;
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
edge[1] = mat->rowind[j];
fwrite(edge, sizeof(int32_t), 2, fpout);
if (writevals)
fwrite(&(mat->rowval[j]), sizeof(float), 1, fpout);
}
}
gk_fclose(fpout);
return;
break;
default:
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
double logscale = 1.0/log(2.0);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp parallel for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = ptr[i+1]-ptr[i];
}
}
/*************************************************************************/
/*! Computes the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1));
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(ptr[i+1]-ptr[i]);
}
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = ptr[i+1]-ptr[i];
}
}
/*************************************************************************/
/*! Returns a new matrix whose rows/columns are shuffled.
\param mat the matrix to be shuffled,
\param what indicates if the rows (GK_CSR_ROW), columns (GK_CSR_COL),
or both (GK_CSR_ROWCOL) will be shuffled,
\param symmetric indicates if the same shuffling will be applied to
both rows and columns. This is valid with nrows==ncols and
GK_CSR_ROWCOL was specified.
\returns the shuffled matrix.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Shuffle(gk_csr_t *mat, int what, int symmetric)
{
ssize_t i, j;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
int *rperm, *cperm;
float *rowval, *nrowval;
gk_csr_t *nmat;
if (what == GK_CSR_ROWCOL && symmetric && mat->nrows != mat->ncols)
gk_errexit(SIGERR, "The matrix is not square for a symmetric rowcol shuffling.\n");
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
rperm = gk_imalloc(nrows, "gk_csr_Shuffle: rperm");
cperm = gk_imalloc(ncols, "gk_csr_Shuffle: cperm");
switch (what) {
case GK_CSR_ROW:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
for (i=0; i<ncols; i++)
cperm[i] = i;
break;
case GK_CSR_COL:
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
for (i=0; i<nrows; i++)
rperm[i] = i;
break;
case GK_CSR_ROWCOL:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
if (symmetric)
gk_icopy(nrows, rperm, cperm);
else {
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
}
break;
default:
gk_free((void **)&rperm, &cperm, LTERM);
gk_errexit(SIGERR, "Unknown shuffling type of %d\n", what);
return NULL;
}
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Shuffle: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Shuffle: nrowind");
nrowval = nmat->rowval = (rowval ? gk_fmalloc(rowptr[nrows], "gk_csr_Shuffle: nrowval") : NULL) ;
for (i=0; i<nrows; i++)
nrowptr[rperm[i]] = rowptr[i+1]-rowptr[i];
MAKECSR(i, nrows, nrowptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
nrowind[nrowptr[rperm[i]]] = cperm[rowind[j]];
if (nrowval)
nrowval[nrowptr[rperm[i]]] = rowval[j];
nrowptr[rperm[i]]++;
}
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&rperm, &cperm, LTERM);
return nmat;
}
/*************************************************************************/
/*! Returns the transpose of the matrix.
\param mat the matrix to be transposed,
\returns the transposed matrix.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Transpose(gk_csr_t *mat)
{
int nrows, ncols;
ssize_t *colptr;
int32_t *colind;
float *colval;
gk_csr_t *nmat;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
mat->colptr = NULL;
mat->colind = NULL;
mat->colval = NULL;
gk_csr_CreateIndex(mat, GK_CSR_COL);
nmat = gk_csr_Create();
nmat->nrows = mat->ncols;
nmat->ncols = mat->nrows;
nmat->rowptr = mat->colptr;
nmat->rowind = mat->colind;
nmat->rowval = mat->colval;
mat->colptr = colptr;
mat->colind = colind;
mat->colval = colval;
return nmat;
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what,
int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat_a the first matrix. The routine assumes that the indices
are sorted in increasing order.
\param mat_b the second matrix. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the row/column from the first matrix (mat_a),
\param i2 is the row/column from the second matrix (mat_b),
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputePairSimilarity(gk_csr_t *mat_a, gk_csr_t *mat_b,
int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat_a->rowptr || !mat_b->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat_a->rowptr[i1+1]-mat_a->rowptr[i1];
nind2 = mat_b->rowptr[i2+1]-mat_b->rowptr[i2];
ind1 = mat_a->rowind + mat_a->rowptr[i1];
ind2 = mat_b->rowind + mat_b->rowptr[i2];
val1 = mat_a->rowval + mat_a->rowptr[i1];
val2 = mat_b->rowval + mat_b->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat_a->colptr || !mat_b->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat_a->colptr[i1+1]-mat_a->colptr[i1];
nind2 = mat_b->colptr[i2+1]-mat_b->colptr[i2];
ind1 = mat_a->colind + mat_a->colptr[i1];
ind2 = mat_b->colind + mat_b->colptr[i2];
val1 = mat_a->colval + mat_a->colptr[i1];
val2 = mat_b->colval + mat_b->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_DOTP,
GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN. In case of
GK_CSR_COS, the rows and the query are assumed to be of unit
length.
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns The number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
GKASSERT((colptr = mat->colptr) != NULL);
GKASSERT((colind = mat->colind) != NULL);
GKASSERT((colval = mat->colval) != NULL);
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_DOTP:
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
GKASSERT((rnorms = mat->rnorms) != NULL);
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
GKASSERT((rsums = mat->rsums) != NULL);
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
/*************************************************************************/
/*! Returns a symmetric version of a square matrix. The symmetric version
is constructed by applying an A op A^T operation, where op is one of
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, GK_CSR_SYM_AVG.
\param mat the matrix to be symmetrized,
\param op indicates the operation to be performed. The possible values are
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, and GK_CSR_SYM_AVG.
\returns the symmetrized matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_MakeSymmetric(gk_csr_t *mat, int op)
{
ssize_t i, j, k, nnz;
int nrows, nadj, hasvals;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind, *marker, *ids;
float *rowval=NULL, *colval=NULL, *nrowval=NULL, *wgts=NULL;
gk_csr_t *nmat;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_MakeSymmetric: The matrix needs to be square.\n");
return NULL;
}
hasvals = (mat->rowval != NULL);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
if (hasvals)
rowval = mat->rowval;
/* create the column view for efficient processing */
colptr = gk_zsmalloc(nrows+1, 0, "colptr");
colind = gk_i32malloc(rowptr[nrows], "colind");
if (hasvals)
colval = gk_fmalloc(rowptr[nrows], "colval");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
colptr[rowind[j]]++;
}
MAKECSR(i, nrows, colptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
colind[colptr[rowind[j]]] = i;
if (hasvals)
colval[colptr[rowind[j]]] = rowval[j];
colptr[rowind[j]]++;
}
}
SHIFTCSR(i, nrows, colptr);
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_MakeSymmetric: nrowptr");
nrowind = nmat->rowind = gk_imalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowind");
if (hasvals)
nrowval = nmat->rowval = gk_fmalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowval");
marker = gk_ismalloc(nrows, -1, "marker");
ids = gk_imalloc(nrows, "ids");
if (hasvals)
wgts = gk_fmalloc(nrows, "wgts");
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
nadj = 0;
/* out-edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ids[nadj] = rowind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*rowval[j] : rowval[j]);
marker[rowind[j]] = nadj++;
}
/* in-edges */
for (j=colptr[i]; j<colptr[i+1]; j++) {
if (marker[colind[j]] == -1) {
if (op != GK_CSR_SYM_MIN) {
ids[nadj] = colind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*colval[j] : colval[j]);
nadj++;
}
}
else {
if (hasvals) {
switch (op) {
case GK_CSR_SYM_MAX:
wgts[marker[colind[j]]] = gk_max(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_MIN:
wgts[marker[colind[j]]] = gk_min(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_SUM:
wgts[marker[colind[j]]] += colval[j];
break;
case GK_CSR_SYM_AVG:
wgts[marker[colind[j]]] = 0.5*(wgts[marker[colind[j]]] + colval[j]);
break;
default:
errexit("Unsupported op for MakeSymmetric!\n");
}
}
marker[colind[j]] = -1;
}
}
/* go over out edges again to resolve any edges that were not found in the in
* edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (marker[rowind[j]] != -1) {
if (op == GK_CSR_SYM_MIN)
ids[marker[rowind[j]]] = -1;
marker[rowind[j]] = -1;
}
}
/* put the non '-1' entries in ids[] into i's row */
for (j=0; j<nadj; j++) {
if (ids[j] != -1) {
nrowind[nnz] = ids[j];
if (hasvals)
nrowval[nnz] = wgts[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&colptr, &colind, &colval, &marker, &ids, &wgts, LTERM);
return nmat;
}
/*************************************************************************/
/*! This function finds the connected components in a graph stored in
CSR format.
\param mat is the graph structure in CSR format
\param cptr is the ptr structure of the CSR representation of the
components. The length of this vector must be mat->nrows+1.
\param cind is the indices structure of the CSR representation of
the components. The length of this vector must be mat->nrows.
\param cids is an array that stores the component # of each vertex
of the graph. The length of this vector must be mat->nrows.
\returns the number of components that it found.
\note The cptr, cind, and cids parameters can be NULL, in which case
only the number of connected components is returned.
*/
/*************************************************************************/
int gk_csr_FindConnectedComponents(gk_csr_t *mat, int32_t *cptr, int32_t *cind,
int32_t *cids)
{
ssize_t i, ii, j, jj, k, nvtxs, first, last, ntodo, ncmps;
ssize_t *xadj;
int32_t *adjncy, *pos, *todo;
int32_t mustfree_ccsr=0, mustfree_where=0;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_FindComponents: The matrix needs to be square.\n");
return -1;
}
nvtxs = mat->nrows;
xadj = mat->rowptr;
adjncy = mat->rowind;
/* Deal with NULL supplied cptr/cind vectors */
if (cptr == NULL) {
cptr = gk_i32malloc(nvtxs+1, "gk_csr_FindComponents: cptr");
cind = gk_i32malloc(nvtxs, "gk_csr_FindComponents: cind");
mustfree_ccsr = 1;
}
/* The list of vertices that have not been touched yet.
The valid entries are from [0..ntodo). */
todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: todo"));
/* For a vertex that has not been visited, pos[i] is the position in the
todo list that this vertex is stored.
If a vertex has been visited, pos[i] = -1. */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: pos"));
/* Find the connected componends */
ncmps = -1;
ntodo = nvtxs; /* All vertices have not been visited */
first = last = 0; /* Point to the first and last vertices that have been touched
but not explored.
These vertices are stored in cind[first]...cind[last-1]. */
while (first < last || ntodo > 0) {
if (first == last) { /* Find another starting vertex */
cptr[++ncmps] = first; /* Mark the end of the current CC */
/* put the first vertex in the todo list as the start of the new CC */
ASSERT(pos[todo[0]] != -1);
cind[last++] = todo[0];
pos[todo[0]] = -1;
todo[0] = todo[--ntodo];
pos[todo[0]] = 0;
}
i = cind[first++]; /* Get the first visited but unexplored vertex */
for (j=xadj[i]; j<xadj[i+1]; j++) {
k = adjncy[j];
if (pos[k] != -1) {
cind[last++] = k;
/* Remove k from the todo list and put the last item in the todo
list at the position that k was so that the todo list will be
consequtive. The pos[] array is updated accordingly to keep track
the location of the vertices in the todo[] list. */
todo[pos[k]] = todo[--ntodo];
pos[todo[pos[k]]] = pos[k];
pos[k] = -1;
}
}
}
cptr[++ncmps] = first;
/* see if we need to return cids */
if (cids != NULL) {
for (i=0; i<ncmps; i++) {
for (j=cptr[i]; j<cptr[i+1]; j++)
cids[cind[j]] = i;
}
}
if (mustfree_ccsr)
gk_free((void **)&cptr, &cind, LTERM);
gk_free((void **)&pos, &todo, LTERM);
return (int) ncmps;
}
/*************************************************************************/
/*! Returns a matrix that has been reordered according to the provided
row/column permutation. The matrix is required to be square and the same
permutation is applied to both rows and columns.
\param[IN] mat is the matrix to be re-ordered.
\param[IN] perm is the new ordering of the rows & columns
\param[IN] iperm is the original ordering of the re-ordered matrix's rows & columns
\returns the newly created reordered matrix.
\note Either perm or iperm can be NULL but not both.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ReorderSymmetric(gk_csr_t *mat, int32_t *perm, int32_t *iperm)
{
ssize_t j, jj;
ssize_t *rowptr, *nrowptr;
int i, k, u, v, nrows;
int freeperm=0, freeiperm=0;
int32_t *rowind, *nrowind;
float *rowval, *nrowval;
gk_csr_t *nmat;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ReorderSymmetric: The matrix needs to be square.\n");
return NULL;
}
if (perm == NULL && iperm == NULL)
return NULL;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = nrows;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ReorderSymmetric: rowptr");
nrowind = nmat->rowind = gk_i32malloc(rowptr[nrows], "gk_csr_ReorderSymmetric: rowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ReorderSymmetric: rowval");
/* allocate memory for the different structures present in the matrix */
if (mat->rlabels)
nmat->rlabels = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: rlabels");
if (mat->rmap)
nmat->rmap = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: rmap");
if (mat->rnorms)
nmat->rnorms = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rnorms");
if (mat->rsums)
nmat->rsums = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rsums");
if (mat->rsizes)
nmat->rsizes = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rsizes");
if (mat->rvols)
nmat->rvols = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rvols");
if (mat->rwgts)
nmat->rwgts = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rwgts");
if (mat->clabels)
nmat->clabels = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: clabels");
if (mat->cmap)
nmat->cmap = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: cmap");
if (mat->cnorms)
nmat->cnorms = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cnorms");
if (mat->csums)
nmat->csums = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: csums");
if (mat->csizes)
nmat->csizes = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: csizes");
if (mat->cvols)
nmat->cvols = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cvols");
if (mat->cwgts)
nmat->cwgts = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cwgts");
/* create perm/iperm if not provided */
if (perm == NULL) {
freeperm = 1;
perm = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: perm");
for (i=0; i<nrows; i++)
perm[iperm[i]] = i;
}
if (iperm == NULL) {
freeiperm = 1;
iperm = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: iperm");
for (i=0; i<nrows; i++)
iperm[perm[i]] = i;
}
/* fill-in the information of the re-ordered matrix */
nrowptr[0] = jj = 0;
for (v=0; v<nrows; v++) {
u = iperm[v];
for (j=rowptr[u]; j<rowptr[u+1]; j++, jj++) {
nrowind[jj] = perm[rowind[j]];
nrowval[jj] = rowval[j];
}
if (mat->rlabels)
nmat->rlabels[v] = mat->rlabels[u];
if (mat->rmap)
nmat->rmap[v] = mat->rmap[u];
if (mat->rnorms)
nmat->rnorms[v] = mat->rnorms[u];
if (mat->rsums)
nmat->rsums[v] = mat->rsums[u];
if (mat->rsizes)
nmat->rsizes[v] = mat->rsizes[u];
if (mat->rvols)
nmat->rvols[v] = mat->rvols[u];
if (mat->rwgts)
nmat->rwgts[v] = mat->rwgts[u];
if (mat->clabels)
nmat->clabels[v] = mat->clabels[u];
if (mat->cmap)
nmat->cmap[v] = mat->cmap[u];
if (mat->cnorms)
nmat->cnorms[v] = mat->cnorms[u];
if (mat->csums)
nmat->csums[v] = mat->csums[u];
if (mat->csizes)
nmat->csizes[v] = mat->csizes[u];
if (mat->cvols)
nmat->cvols[v] = mat->cvols[u];
if (mat->cwgts)
nmat->cwgts[v] = mat->cwgts[u];
nrowptr[v+1] = jj;
}
/* free memory */
if (freeperm)
gk_free((void **)&perm, LTERM);
if (freeiperm)
gk_free((void **)&iperm, LTERM);
return nmat;
}
/*************************************************************************/
/*! This function computes a permutation of the rows/columns of a symmetric
matrix based on a breadth-first-traversal. It can be used for re-ordering
the matrix to reduce its bandwidth for better cache locality.
\param[IN] mat is the matrix whose ordering to be computed.
\param[IN] maxdegree is the maximum number of nonzeros of the rows that
will participate in the BFS ordering. Rows with more nonzeros
will be put at the front of the ordering in decreasing degree
order.
\param[IN] v is the starting row of the BFS. A value of -1 indicates that
a randomly selected row will be used.
\param[OUT] perm[i] stores the ID of row i in the re-ordered matrix.
\param[OUT] iperm[i] stores the ID of the row that corresponds to
the ith vertex in the re-ordered matrix.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_csr_ComputeBFSOrderingSymmetric(gk_csr_t *mat, int maxdegree, int v,
int32_t **r_perm, int32_t **r_iperm)
{
int i, k, nrows, first, last;
ssize_t j, *rowptr;
int32_t *rowind, *cot, *pos;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ComputeBFSOrderingSymmetric: The matrix needs to be square.\n");
return;
}
if (maxdegree < mat->nrows && v != -1) {
fprintf(stderr, "gk_csr_ComputeBFSOrderingSymmetric: Since maxdegree node renumbering is requested the starting row should be -1.\n");
return;
}
if (mat->nrows <= 0)
return;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
/* This array will function like pos + touched of the CC method */
pos = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBFSOrderingSymmetric: pos"));
/* This array ([C]losed[O]pen[T]odo => cot) serves three purposes.
Positions from [0...first) is the current iperm[] vector of the explored rows;
Positions from [first...last) is the OPEN list (i.e., visited rows);
Positions from [last...nrows) is the todo list. */
cot = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBFSOrderingSymmetric: cot"));
first = last = 0;
/* deal with maxdegree handling */
if (maxdegree < nrows) {
last = nrows;
for (i=nrows-1; i>=0; i--) {
if (rowptr[i+1]-rowptr[i] < maxdegree) {
cot[--last] = i;
pos[i] = last;
}
else {
cot[first++] = i;
pos[i] = -1;
}
}
GKASSERT(first == last);
if (last > 0) { /* reorder them in degree decreasing order */
gk_ikv_t *cand = gk_ikvmalloc(first, "gk_csr_ComputeBFSOrderingSymmetric: cand");
for (i=0; i<first; i++) {
k = cot[i];
cand[i].key = (int)(rowptr[k+1]-rowptr[k]);
cand[i].val = k;
}
gk_ikvsortd(first, cand);
for (i=0; i<first; i++)
cot[i] = cand[i].val;
gk_free((void **)&cand, LTERM);
}
v = cot[last + RandomInRange(nrows-last)];
}
/* swap v with the front of the todo list */
cot[pos[v]] = cot[last];
pos[cot[last]] = pos[v];
cot[last] = v;
pos[v] = last;
/* start processing the nodes */
while (first < nrows) {
if (first == last) { /* find another starting row */
k = cot[last];
GKASSERT(pos[k] != -1);
pos[k] = -1; /* mark node as being visited */
last++;
}
i = cot[first++]; /* the ++ advances the explored rows */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
k = rowind[j];
/* if a node has already been visited, its perm[] will be -1 */
if (pos[k] != -1) {
/* pos[k] is the location within iperm of where k resides (it is in the 'todo' part);
It is placed in that location cot[last] (end of OPEN list) that we
are about to overwrite and update pos[cot[last]] to reflect that. */
cot[pos[k]] = cot[last]; /* put the head of the todo list to
where k was in the todo list */
pos[cot[last]] = pos[k]; /* update perm to reflect the move */
cot[last++] = k; /* put node at the end of the OPEN list */
pos[k] = -1; /* mark node as being visited */
}
}
}
/* time to decide what to return */
if (r_perm != NULL) {
/* use the 'pos' array to build the perm array */
for (i=0; i<nrows; i++)
pos[cot[i]] = i;
*r_perm = pos;
pos = NULL;
}
if (r_iperm != NULL) {
*r_iperm = cot;
cot = NULL;
}
/* cleanup memory */
gk_free((void **)&pos, &cot, LTERM);
}
/*************************************************************************/
/*! This function computes a permutation of the rows of a symmetric matrix
based on a best-first-traversal. It can be used for re-ordering the matrix
to reduce its bandwidth for better cache locality.
\param[IN] mat is the matrix structure.
\param[IN] v is the starting row of the best-first traversal.
\param[IN] type indicates the criteria to use to measure the 'bestness'
of a row.
\param[OUT] perm[i] stores the ID of row i in the re-ordered matrix.
\param[OUT] iperm[i] stores the ID of the row that corresponds to
the ith row in the re-ordered matrix.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_csr_ComputeBestFOrderingSymmetric(gk_csr_t *mat, int v, int type,
int32_t **r_perm, int32_t **r_iperm)
{
ssize_t j, jj, *rowptr;
int i, k, u, nrows, nopen, ntodo;
int32_t *rowind, *perm, *degrees, *wdegrees, *sod, *level, *ot, *pos;
gk_i32pq_t *queue;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ComputeBestFOrderingSymmetric: The matrix needs to be square.\n");
return;
}
if (mat->nrows <= 0)
return;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
/* the degree of the vertices in the closed list */
degrees = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: degrees");
/* the weighted degree of the vertices in the closed list for type==3 */
wdegrees = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: wdegrees");
/* the sum of differences for type==4 */
sod = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: sod");
/* the encountering level of a vertex type==5 */
level = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: level");
/* The open+todo list of vertices.
The vertices from [0..nopen] are the open vertices.
The vertices from [nopen..ntodo) are the todo vertices.
*/
ot = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBestFOrderingSymmetric: ot"));
/* For a vertex that has not been explored, pos[i] is the position in the ot list. */
pos = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBestFOrderingSymmetric: pos"));
/* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */
perm = gk_i32smalloc(nrows, -1, "gk_csr_ComputeBestFOrderingSymmetric: perm");
/* create the queue and put the starting vertex in it */
queue = gk_i32pqCreate(nrows);
gk_i32pqInsert(queue, v, 1);
/* put v at the front of the open list */
pos[0] = ot[0] = v;
pos[v] = ot[v] = 0;
nopen = 1;
ntodo = nrows;
/* start processing the nodes */
for (i=0; i<nrows; i++) {
if (nopen == 0) { /* deal with non-connected graphs */
gk_i32pqInsert(queue, ot[0], 1);
nopen++;
}
if ((v = gk_i32pqGetTop(queue)) == -1)
gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i);
if (perm[v] != -1)
gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v);
perm[v] = i;
if (ot[pos[v]] != v)
gk_errexit(SIGERR, "Something went wrong [ot[pos[%d]]!=%d.\n", v, v);
if (pos[v] >= nopen)
gk_errexit(SIGERR, "The position of v is not in open list. pos[%d]=%d is >=%d.\n", v, pos[v], nopen);
/* remove v from the open list and re-arrange the todo part of the list */
ot[pos[v]] = ot[nopen-1];
pos[ot[nopen-1]] = pos[v];
if (ntodo > nopen) {
ot[nopen-1] = ot[ntodo-1];
pos[ot[ntodo-1]] = nopen-1;
}
nopen--;
ntodo--;
for (j=rowptr[v]; j<rowptr[v+1]; j++) {
u = rowind[j];
if (perm[u] == -1) {
/* update ot list, if u is not in the open list by putting it at the end
of the open list. */
if (degrees[u] == 0) {
ot[pos[u]] = ot[nopen];
pos[ot[nopen]] = pos[u];
ot[nopen] = u;
pos[u] = nopen;
nopen++;
level[u] = level[v]+1;
gk_i32pqInsert(queue, u, 0);
}
/* update the in-closed degree */
degrees[u]++;
/* update the queues based on the type */
switch (type) {
case 1: /* DFS */
gk_i32pqUpdate(queue, u, 1000*(i+1)+degrees[u]);
break;
case 2: /* Max in closed degree */
gk_i32pqUpdate(queue, u, degrees[u]);
break;
case 3: /* Sum of orders in closed list */
wdegrees[u] += i;
gk_i32pqUpdate(queue, u, wdegrees[u]);
break;
case 4: /* Sum of order-differences */
/* this is handled at the end of the loop */
;
break;
case 5: /* BFS with in degree priority */
gk_i32pqUpdate(queue, u, -(1000*level[u] - degrees[u]));
break;
case 6: /* Hybrid of 1+2 */
gk_i32pqUpdate(queue, u, (i+1)*degrees[u]);
break;
default:
;
}
}
}
if (type == 4) { /* update all the vertices in the open list */
for (j=0; j<nopen; j++) {
u = ot[j];
if (perm[u] != -1)
gk_errexit(SIGERR, "For i=%d, the open list contains a closed row: ot[%zd]=%d, perm[%d]=%d.\n", i, j, u, u, perm[u]);
sod[u] += degrees[u];
if (i<1000 || i%25==0)
gk_i32pqUpdate(queue, u, sod[u]);
}
}
/*
for (j=0; j<ntodo; j++) {
if (pos[ot[j]] != j)
gk_errexit(SIGERR, "pos[ot[%zd]] != %zd.\n", j, j);
}
*/
}
/* time to decide what to return */
if (r_iperm != NULL) {
/* use the 'degrees' array to build the iperm array */
for (i=0; i<nrows; i++)
degrees[perm[i]] = i;
*r_iperm = degrees;
degrees = NULL;
}
if (r_perm != NULL) {
*r_perm = perm;
perm = NULL;
}
/* cleanup memory */
gk_i32pqDestroy(queue);
gk_free((void **)&perm, °rees, &wdegrees, &sod, &ot, &pos, &level, LTERM);
}
|
app_baseline.c | /**
* @file app.c
* @brief Template for a Host Application Source File.
*
*/
#include "../../support/timer.h"
#include <assert.h>
#include <getopt.h>
#include <omp.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static uint64_t *A;
static uint64_t *B;
static uint64_t *C;
static uint64_t *C2;
static int pos;
bool pred(const uint64_t x) {
return (x % 2) == 0;
}
void *create_test_file(unsigned int nr_elements) {
// srand(0);
A = (uint64_t *)malloc(nr_elements * sizeof(uint64_t));
B = (uint64_t *)malloc(nr_elements * sizeof(uint64_t));
C = (uint64_t *)malloc(nr_elements * sizeof(uint64_t));
printf("nr_elements\t%u\t", nr_elements);
for (int i = 0; i < nr_elements; i++) {
// A[i] = (unsigned int) (rand());
A[i] = i + 1;
B[i] = 0;
}
}
/**
* @brief compute output in the host
*/
static int select_host(int size, int t) {
pos = 0;
C[pos] = A[pos];
omp_set_num_threads(t);
#pragma omp parallel for
for (int my = 1; my < size; my++) {
if (!pred(A[my])) {
int p;
#pragma omp atomic update
pos++;
p = pos;
C[p] = A[my];
}
}
return pos;
}
// Params ---------------------------------------------------------------------
typedef struct Params {
char *dpu_type;
int input_size;
int n_warmup;
int n_reps;
int n_threads;
} Params;
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -d <D> DPU type (default=fsim)"
"\n -t <T> # of threads (default=8)"
"\n -w <W> # of untimed warmup iterations (default=2)"
"\n -e <E> # of timed repetition iterations (default=5)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=8M elements)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.input_size = 16 << 20;
p.n_warmup = 1;
p.n_reps = 3;
p.n_threads = 5;
int opt;
while ((opt = getopt(argc, argv, "hi:w:e:t:")) >= 0) {
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 'i':
p.input_size = atoi(optarg);
break;
case 'w':
p.n_warmup = atoi(optarg);
break;
case 'e':
p.n_reps = atoi(optarg);
break;
case 't':
p.n_threads = atoi(optarg);
break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.n_threads > 0 && "Invalid # of ranks!");
return p;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
const unsigned int file_size = p.input_size;
uint32_t accum = 0;
int total_count;
// Create an input file with arbitrary data.
create_test_file(file_size);
Timer timer;
start(&timer, 0, 0);
total_count = select_host(file_size, p.n_threads);
stop(&timer, 0);
printf("Total count = %d\t", total_count);
printf("Kernel ");
print(&timer, 0, 1);
printf("\n");
free(A);
free(B);
free(C);
return 0;
}
|
Main.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
int main(int argc, char* argv[])
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 13;
int mype = 0;
#if OMP == 1
int max_procs = omp_get_num_procs();
#else
int max_procs = 1;
#endif
int i, thread, mat;
unsigned long seed;
double omp_start, omp_end, p_energy;
unsigned long long vhash = 0;
int nprocs;
double acc_start, acc_end;
//Inputs
int nthreads;
long n_isotopes;
long n_gridpoints;
int lookups;
char HM[6];
double *nuclide_grids;
double *energy_grid;
int *grid_ptrs;
int *index_data;
int size_mats, *num_nucs, *mats_ptr, *mats;
double *concs;
int bench_n; // benchmark loop index
double macro_xs_vector[5];
char line[256]; // verification hash
unsigned long long vhash_local; // verification hash
#ifdef MPI
MPI_Status stat;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
#ifdef VERIFICATION
srand(26);
#else
srand(time(NULL));
#endif
// Process CLI Fields -- store in "Inputs" structure
read_CLI(argc, argv, &nthreads, &n_isotopes, &n_gridpoints, &lookups, HM);
// Set number of OpenMP Threads
#if OMP == 1
omp_set_num_threads(nthreads);
#endif
// Print-out of Input Summary
if(mype == 0) print_inputs(nthreads, n_isotopes, n_gridpoints, lookups, HM, nprocs, version);
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// Allocate & fill energy grids
#ifndef BINARY_READ
if(mype == 0) printf("Generating Nuclide Energy Grids...\n");
#endif
nuclide_grids = (double *) malloc(n_isotopes *n_gridpoints * 6 * sizeof(double));
#ifdef VERIFICATION
generate_grids_v(nuclide_grids,n_isotopes,n_gridpoints);
#else
generate_grids(nuclide_grids,n_isotopes,n_gridpoints);
#endif
// Sort grids by energy
#ifndef BINARY_READ
if(mype == 0) printf("Sorting Nuclide Energy Grids...\n");
sort_nuclide_grids(nuclide_grids,n_isotopes,n_gridpoints);
#endif
// Prepare Unionized Energy Grid Framework
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
#ifndef BINARY_READ
energy_grid = generate_energy_grid(n_isotopes,n_gridpoints, nuclide_grids);
grid_ptrs = generate_grid_ptrs(n_isotopes,n_gridpoints, nuclide_grids, energy_grid);
#else
energy_grid = malloc(n_isotopes*n_gridpoints*sizeof(double));
grid_ptrs = (int *) malloc(n_isotopes*n_gridpoints*n_isotopes*sizeof(int));
#endif
#ifdef BINARY_READ
if(mype == 0) printf("Reading data from \"XS_data.dat\" file...\n");
binary_read(n_isotopes,n_gridpoints, nuclide_grids, energy_grid, grid_ptrs);
#endif
// Get material data
if(mype == 0) printf("Loading Mats...\n");
if(n_isotopes == 68) size_mats = 197;
else size_mats = 484;
num_nucs = load_num_nucs(n_isotopes);
mats_ptr = load_mats_ptr(num_nucs);
mats = load_mats(num_nucs, mats_ptr, size_mats,n_isotopes);
#ifdef VERIFICATION
concs = load_concs_v(size_mats);
#else
concs = load_concs(size_mats);
#endif
#ifdef BINARY_DUMP
if(mype == 0) printf("Dumping data to binary file...\n");
binary_dump(n_isotopes,n_gridpoints, nuclide_grids, energy_grid, grid_ptrs);
if(mype == 0) printf("Binary file \"XS_data.dat\" written! Exiting...\n");
return 0;
#endif
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
// Outer benchmark loop can loop through all possible # of threads
#if defined(BENCHMARK) && (OMP == 1)
for(bench_n = 1; bench_n <=omp_get_num_procs(); bench_n++)
{
nthreads = bench_n;
omp_set_num_threads(nthreads);
#endif
if(mype == 0)
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
#ifndef OPENACC
#if OMP == 1
omp_start = omp_get_wtime();
#else
omp_start = timer();
#endif
#else
acc_start = timer();
#endif
//initialize papi with one thread (master) here
#ifdef PAPI
if ( PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT){
fprintf(stderr, "PAPI library init error!\n");
exit(1);
}
#endif
#ifndef OPENACC
#pragma omp parallel default(none) \
private(i, thread, p_energy, mat, seed, vhash_local, line, macro_xs_vector) \
shared( max_procs, nthreads, n_isotopes, n_gridpoints, lookups, HM, energy_grid, \
nuclide_grids, grid_ptrs, mats_ptr, mats, concs, num_nucs, mype, vhash)
#else
#pragma acc data \
copy(vhash) \
copyin(lookups, n_isotopes, n_gridpoints, \
num_nucs[0:n_isotopes], concs[0:size_mats], mats[0:size_mats], mats_ptr[0:12], \
energy_grid[0:n_isotopes*n_gridpoints], \
grid_ptrs[0:n_isotopes*n_isotopes*n_gridpoints], \
nuclide_grids[0:n_isotopes*n_gridpoints*6])
#endif
{
// Initialize parallel PAPI counters
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#pragma omp critical
{
counter_init(&eventset, &num_papi_events);
}
#endif
#ifndef OPENACC
#if OMP == 1
thread = omp_get_thread_num();
#else
thread = 0;
#endif
seed = (thread+1)*19+17;
#else
seed = 13; //what to do for openacc?
#endif
// XS Lookup Loop
#ifndef OPENACC
#pragma omp for schedule(dynamic)
#else
#pragma acc parallel loop independent \
firstprivate(seed) \
private(macro_xs_vector, p_energy, mat, vhash_local, line)
#endif
for(i=0; i<lookups; i++)
{
#ifndef OPENACC
// Status text
if( INFO && mype == 0 && thread == 0 && i % 1000 == 0 )
printf("\rCalculating XS's... (%.0lf%% completed)",
(i / ( (double)lookups / (double)nthreads ))
/ (double)nthreads * 100.0);
#endif
// Randomly pick an energy and material for the particle
#ifdef VERIFICATION
#ifndef OPENACC
#pragma omp critical
#endif
{
mat = pick_mat(&seed);
p_energy = rn_v();
}
#else
mat = pick_mat(&seed);
p_energy = rn(&seed);
#endif
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
calculate_macro_xs(p_energy, mat, n_isotopes, n_gridpoints,
num_nucs, concs, energy_grid, nuclide_grids,
grid_ptrs, mats, mats_ptr, macro_xs_vector);
// Verification hash calculation
// This method provides a consistent hash accross
// architectures and compilers.
#ifdef VERIFICATION
sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf",
p_energy, mat,
macro_xs_vector[0],
macro_xs_vector[1],
macro_xs_vector[2],
macro_xs_vector[3],
macro_xs_vector[4]);
vhash_local = hash((unsigned char *)line, 10000);
#ifndef OPENACC
#pragma omp atomic
#endif
vhash += vhash_local;
#endif
}
// Prints out thread local PAPI counters
#ifdef PAPI
if( mype == 0 && thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#pragma omp barrier
}
counter_stop(&eventset, num_papi_events);
#endif
}
#ifndef PAPI
if( mype == 0) printf("\nSimulation complete.\n" );
#endif
#ifndef OPENACC
#if OMP == 1
omp_end = omp_get_wtime();
#else
omp_end = timer();
#endif
print_results(nthreads, n_isotopes, n_gridpoints, lookups, HM, mype, omp_end-omp_start, nprocs, vhash);
#else
acc_end = timer();
print_results(nthreads, n_isotopes, n_gridpoints, lookups, HM, mype, acc_end-acc_start, nprocs, vhash);
#endif
#if defined(BENCHMARK) && (OMP == 1)
}
#endif
#ifdef MPI
MPI_Finalize();
#endif
return 0;
}
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fp32_fp32)
// op(A') function: GB (_unop_tran__log2_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
genopheno.h | /**
* CMA-ES, Covariance Matrix Adaptation Evolution Strategy
* Copyright (c) 2014 Inria
* Author: Emmanuel Benazera <emmanuel.benazera@lri.fr>
*
* This file is part of libcmaes.
*
* libcmaes is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* libcmaes is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with libcmaes. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef GENOPHENO_H
#define GENOPHENO_H
#include <libcmaes/noboundstrategy.h>
#include <libcmaes/pwq_bound_strategy.h>
#include <libcmaes/scaling.h>
#include <vector>
namespace libcmaes
{
typedef std::function<void (const double*, double*, const int&)> TransFunc;
template <class TBoundStrategy=NoBoundStrategy,class TScalingStrategy=NoScalingStrategy>
class GenoPheno
{
friend class CMASolutions;
public:
GenoPheno()
:_id(true)
{}
GenoPheno(TransFunc &genof, TransFunc &phenof)
:_genof(genof),_phenof(phenof),_id(false)
{}
GenoPheno(const double *lbounds, const double *ubounds, const int &dim)
:_boundstrategy(lbounds,ubounds,dim),_id(true),_scalingstrategy(lbounds,ubounds,dim)
{
if (_scalingstrategy._id)
_boundstrategy = TBoundStrategy(lbounds,ubounds,dim);
else
{
std::vector<double> lb(dim,_scalingstrategy._intmin);
std::vector<double> ub(dim,_scalingstrategy._intmax);
_boundstrategy = TBoundStrategy(&lb.front(),&ub.front(),lbounds,ubounds,dim);
}
}
GenoPheno(TransFunc &genof, TransFunc &phenof,
const double *lbounds, const double *ubounds, const int &dim)
:_boundstrategy(lbounds,ubounds,dim),_genof(genof),_phenof(phenof),_id(false),_scalingstrategy(lbounds,ubounds,dim)
{
if (_scalingstrategy._id)
_boundstrategy = TBoundStrategy(lbounds,ubounds,dim);
else
{
std::vector<double> lb(dim,_scalingstrategy._intmin);
std::vector<double> ub(dim,_scalingstrategy._intmax);
_boundstrategy = TBoundStrategy(&lb.front(),&ub.front(),lbounds,ubounds,dim);
}
}
/**
* \brief this is a dummy constructor to accomodate an easy to use
* linear scaling with pwq bounds from a given scaling vector.
* Outside the library, the proper way to re-specialize for other
* custom scaling classes would be to inherit GenoPheno and
* specialize constructors within the new class.
* @param scaling vector for linear scaling of input parameters.
*/
GenoPheno(const dVec &scaling,
const dVec &shift,
const double *lbounds=nullptr,
const double *ubounds=nullptr)
:_id(true)
{
(void)scaling;
(void)shift;
(void)lbounds;
(void)ubounds;
}
~GenoPheno() {}
private:
dMat pheno_candidates(const dMat &candidates) const
{
if (!_id)
{
dMat ncandidates = dMat(candidates.rows(),candidates.cols());
#pragma omp parallel for if (candidates.cols() >= 100)
for (int i=0;i<candidates.cols();i++)
{
dVec ext = dVec(candidates.rows());
_phenof(candidates.col(i).data(),ext.data(),candidates.rows());
ncandidates.col(i) = ext;
}
return ncandidates;
}
return candidates;
}
dMat geno_candidates(const dMat &candidates) const
{
if (!_id)
{
dMat ncandidates = dMat(candidates.rows(),candidates.cols());
#pragma omp parallel for if (candidates.cols() >= 100)
for (int i=0;i<candidates.cols();i++)
{
dVec in = dVec(candidates.rows());
_genof(candidates.col(i).data(),in.data(),candidates.rows());
ncandidates.col(i) = in;
}
return ncandidates;
}
return candidates;
}
public:
dMat pheno(const dMat &candidates) const
{
// apply custom pheno function.
dMat ncandidates = pheno_candidates(candidates);
// apply bounds.
#pragma omp parallel for if (ncandidates.cols() >= 100)
for (int i=0;i<ncandidates.cols();i++)
{
dVec ycoli;
_boundstrategy.to_f_representation(ncandidates.col(i),ycoli);
ncandidates.col(i) = ycoli;
}
// apply scaling.
if (!_scalingstrategy._id)
{
#pragma omp parallel for if (ncandidates.cols() >= 100)
for (int i=0;i<ncandidates.cols();i++)
{
dVec ycoli;
_scalingstrategy.scale_to_f(ncandidates.col(i),ycoli);
ncandidates.col(i) = ycoli;
}
}
return ncandidates;
}
dMat geno(const dMat &candidates) const
{
// reverse scaling.
dMat ncandidates = candidates;
if (!_scalingstrategy._id)
{
#pragma omp parallel for if (ncandidates.cols() >= 100)
for (int i=0;i<ncandidates.cols();i++)
{
dVec ycoli;
_scalingstrategy.scale_to_internal(ycoli,ncandidates.col(i));
ncandidates.col(i) = ycoli;
}
}
// reverse bounds.
#pragma omp parallel for if (ncandidates.cols() >= 100)
for (int i=0;i<ncandidates.cols();i++)
{
dVec ycoli;
_boundstrategy.to_internal_representation(ycoli,ncandidates.col(i));
ncandidates.col(i) = ycoli;
}
// apply custom geno function.
ncandidates = geno_candidates(ncandidates);
return ncandidates;
}
dVec pheno(const dVec &candidate) const
{
// apply custom pheno function.
dVec ncandidate;
if (!_id)
{
ncandidate = dVec(candidate.rows());
_phenof(candidate.data(),ncandidate.data(),candidate.rows());
}
// apply bounds.
dVec phen = dVec::Zero(candidate.rows());
if (_id)
_boundstrategy.to_f_representation(candidate,phen);
else _boundstrategy.to_f_representation(ncandidate,phen);
// apply scaling.
if (!_scalingstrategy._id)
{
dVec sphen = dVec::Zero(phen.rows());
_scalingstrategy.scale_to_f(phen,sphen);
phen = sphen;
}
return phen;
}
dVec geno(const dVec &candidate) const
{
dVec ccandidate = candidate;
dVec gen = dVec::Zero(candidate.rows());
// reverse scaling.
if (!_scalingstrategy._id)
{
_scalingstrategy.scale_to_internal(gen,candidate);
ccandidate = gen;
}
// reverse bounds.
_boundstrategy.to_internal_representation(gen,ccandidate);
// apply custom geno function.
if (!_id)
{
dVec ncandidate(gen.rows());
_genof(gen.data(),ncandidate.data(),gen.rows());
return ncandidate;
}
else return gen;
}
TBoundStrategy get_boundstrategy() const { return _boundstrategy; }
TBoundStrategy& get_boundstrategy_ref() { return _boundstrategy; }
TScalingStrategy get_scalingstrategy() const { return _scalingstrategy; }
void remove_dimensions(const std::vector<int> &k)
{
if (!_scalingstrategy.is_id())
_scalingstrategy.remove_dimensions(k);
if (!_boundstrategy.is_id())
_boundstrategy.remove_dimensions(k);
}
private:
TBoundStrategy _boundstrategy;
TransFunc _genof;
TransFunc _phenof;
bool _id = false; /**< geno/pheno transform is identity. */
TScalingStrategy _scalingstrategy;
};
// specialization when no bound strategy nor scaling applies.
template<> inline dMat GenoPheno<NoBoundStrategy,NoScalingStrategy>::pheno(const dMat &candidates) const
{
if (_id)
return candidates;
else return pheno_candidates(candidates);
}
template<> inline dVec GenoPheno<NoBoundStrategy,NoScalingStrategy>::pheno(const dVec &candidate) const
{
if (_id)
return candidate;
else
{
dVec ncandidate(candidate.rows());
_phenof(candidate.data(),ncandidate.data(),candidate.rows());
return ncandidate;
}
}
template<> inline dVec GenoPheno<NoBoundStrategy,NoScalingStrategy>::geno(const dVec &candidate) const
{
if (_id)
return candidate;
else
{
dVec ncandidate(candidate.rows());
_genof(candidate.data(),ncandidate.data(),candidate.rows());
return ncandidate;
}
}
template<> inline dVec GenoPheno<NoBoundStrategy,linScalingStrategy>::pheno(const dVec &candidate) const
{
dVec ncandidate(candidate.rows());
if (!_id)
_phenof(candidate.data(),ncandidate.data(),candidate.rows());
dVec sphen;
if (!_id)
_scalingstrategy.scale_to_f(ncandidate,sphen);
else _scalingstrategy.scale_to_f(candidate,sphen);
return sphen;
}
template<> inline dVec GenoPheno<NoBoundStrategy,linScalingStrategy>::geno(const dVec &candidate) const
{
dVec scand = dVec::Zero(candidate.rows());
_scalingstrategy.scale_to_internal(scand,candidate);
if (_id)
return scand;
else
{
dVec ncandidate(candidate.rows());
_genof(scand.data(),scand.data(),candidate.rows());
return ncandidate;
}
}
template<> inline dMat GenoPheno<NoBoundStrategy,linScalingStrategy>::pheno(const dMat &candidates) const
{
dMat ncandidates;
if (!_id)
ncandidates = pheno_candidates(candidates);
else ncandidates = candidates;
// apply scaling.
#pragma omp parallel for if (ncandidates.cols() >= 100)
for (int i=0;i<ncandidates.cols();i++)
{
dVec ycoli;
_scalingstrategy.scale_to_f(ncandidates.col(i),ycoli);
ncandidates.col(i) = ycoli;
}
return ncandidates;
}
template<> inline GenoPheno<NoBoundStrategy,linScalingStrategy>::GenoPheno(const dVec &scaling,
const dVec &shift,
const double *lbounds,
const double *ubounds)
:_id(true)
{
(void)lbounds;
(void)ubounds;
_scalingstrategy = linScalingStrategy(scaling,shift);
}
template<> inline GenoPheno<pwqBoundStrategy,linScalingStrategy>::GenoPheno(const dVec &scaling,
const dVec &shift,
const double *lbounds,
const double *ubounds)
:_id(true)
{
_scalingstrategy = linScalingStrategy(scaling,shift);
if (lbounds == nullptr || ubounds == nullptr)
return;
dVec vlbounds = Eigen::Map<dVec>(const_cast<double*>(lbounds),scaling.size());
dVec vubounds = Eigen::Map<dVec>(const_cast<double*>(ubounds),scaling.size());
dVec nlbounds, nubounds;
_scalingstrategy.scale_to_internal(nlbounds,vlbounds);
_scalingstrategy.scale_to_internal(nubounds,vubounds);
_boundstrategy = pwqBoundStrategy(nlbounds.data(),nubounds.data(),scaling.size());
}
}
#endif
|
diagsm_x_csc_u_row.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
for (ALPHA_INT c = 0; c < columns; ++c)
{
alpha_mul(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
morph_library.h | #include "../include/CImgFloatWrapper.h"
#include "../loewner_morphology.h"
#include "omp.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <thread>
#define THREADS_X 16
#define THREADS_Y 16
#define TILE_X 16
#define TILE_Y 16
#define BLOCK_SIZE 256
#define CONSTANT_SIZE 1024
#define OMP_THREAD_NUM 8
#define NUM_STREAMS 4
#define first(idx, n, dim) (((idx) * (dim)) / (n))
#define last(idx, n, dim) ((first(idx + 1, n, dim)) - 1)
#define size(idx, n, dim) ((last(idx, n, dim)) - (first(idx, n, dim)) + 1)
int __constant__ maskMemory[CONSTANT_SIZE]; // constant memroy array
cudaStream_t streams[NUM_STREAMS]; // cuda streams
typedef LoewnerMorphology::MorphCircle Circle;
/*
*==============================================================================================================
* Class that contains morphological operations which are introduced in the paper of B. Burgeth and A. Kleefeld
*==============================================================================================================
*/
class LoewnerMorphology::Morph {
public:
/*
* Constructor of the class Morph. It takes name of the file where image is stored, name of the file where mask (structural element) is stored
* and dimension of the mask as arguments.
*/
Morph(const char *imageFile, const char *maskFile, int maskDim);
/*
* Destructor of the class Morph.
*/
~Morph();
// MORPHOLOGICAL OPERATION
/*
* Performs morphological opration dilation on the input image.
*/
void dilation(int iter = 1);
/*
* Performs morphological operation erosion on the input image.
*/
void erosion(int iter = 1);
/*
* Performs morphological operation closing on the input image.
*/
void closing(int iter = 1);
/*
* Performs morphological operation opening on the input image.
*/
void opening(int iter = 1);
/*
* Performs morphological operation black top hat on the input image.
*/
void blackTopHat(int iter = 1);
/*
* Performs morphological operation white top hat on the input image.
*/
void whiteTopHat(int iter = 1);
/*
* Performs morphological operation self-dual top hat on the input image.
*/
void selfDualTopHat(int iter = 1);
/*
* Performs morphological operation beucher gradient on the input image.
*/
void beucherGradient(int iter = 1);
/*
* Performs morphological operation internal gradient on the input image.
*/
void externalGradient(int iter = 1);
/*
* Performs morphological operation internal gradient on the input image.
*/
void internalGradient(int iter = 1);
/*
* Performs morphological operation morphological laplacian on the input image.
*/
void laplacian(int iter = 1);
/*
* Performs morphological operation shock filter on the input image.
*/
void shockFilter(int iter = 1);
/*
* Displays original image.
*/
void displayOriginalImage();
/*
* Displays result of the morphological operation if the operation is called.
*/
void displayResultImage();
/*
* Returns result image as an array of floats. It allocates memory equal to the size of the image times spectrum.
*/
float *returnResult();
/*
* Saves result to the file which name is provided.
*/
void saveResult(const char *fileName);
private:
CImgFloatWrapper *inputImage; // input image
CImgFloatWrapper *outputImage; // output image - after morphological operation
int *mask; // mask array
int padding; // mask padding
LoewnerMorphology::MorphColorMatrix *matrices; // input image converted to array of MorphColorMatrix objects
LoewnerMorphology::MorphColorMatrix *result; // result image converted to array of MorphColorMatrix objects
int width; // width of the image
int height; // height of the image
int spectrum; // spectrum of the image
int size; // size of the image
// HANDLERS
/*
* Invokes GPU kernel for performing modified Einstein subtraction of two given image vectors in device memory.
* Kernel is launched without further synchronization. Width and height are the dimensions of the original image.
* Since image1 and image2 are image matrices stored in row-major vectorized format, one should provide lda for each of them.
*/
template<typename T>
static void morph_einstein_async(T *image1, T *image2, int width, int height, int lda1, int lda2, cudaStream_t stream = 0);
/*
* Method that copies original image stored on memory location in to device pointer dev_out2, performs modified Einstein substraction
* between images stored on device on locations dev_out1 and dev_out2 and then copies the result to host on memory location out.
* Type of the subtraction is determined by template parameter type. If the type parameter is false, dev_out2 will be subtracted from
* dev_out1, if the type parameter is true, dev_out1 will be subtracted from dev_out2 in terms of Einstein subtraction. The operation
* is performed asynchronusely with N streams. Method does not do explicit synchronizations.
*/
template<typename T, bool type, int N>
static void morph_einstein_copy_original_launcher(T *dev_out1, T *dev_out2, T *in, T *out, int width, int height, cudaStream_t *streams);
/*
* Method modified Einstein substraction between images stored on device on locations dev_out1 and dev_out2 and then copies the result
* to host on memory location out. More precisely, dev_out2 will be subtracted from dev_out2 will be subtracted from dev_out1 in terms of Einstein subtraction. The operation
* is performed asynchronusely with N streams. Method does not do explicit synchronizations.
*/
template<typename T, int N>
static void morph_einstein_copy_launcher(T *dev_out1, T *dev_out2, T *out, int width, int height, cudaStream_t *streams);
/*
* Method that invokes the kernels for basic morphological operations. Morphological operation is determined with template parameter type. If it is false, dilation
* is performed, in the other case, erosion is performed. Arguments dev_in and dev_out are device pointers. On the location dev_in there is
* an array of size (pWidth + 2 * padding) * (pHeight + 2 * padding) containing image information with appropriate padding converthed to type T. Pointer dev_out contains preallocated
* memory for the result image. Size of the allocated memory is width * height where width and height are initial image dimensions. Also, pWidth and pHeight are expected to be calculated like this:
* pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X and pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y. Padding is appropriate mask padding.
* It is calculated as mask dimension / 2 where / is integer division. The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
* Also, sheared memory size in bytes should be provided. On host memory location in, original image converted to type T must be stored.
*/
template<typename T, bool type>
static void morph_basic_launcher(T *dev_in, T *dev_out, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream = 0);
/*
* Method that invokes the kernels for morphological operation shockfilter. Arguments dev_in and dev_out are device pointers. On the location dev_in there is
* an array of size (pWidth + 2 * padding) * (pHeight + 2 * padding) containing image information with appropriate padding converthed to type T. Pointer dev_out contains preallocated
* memory for the result image. Size of the allocated memory is width * height where width and height are initial image dimensions. Also, pWidth and pHeight are expected to be calculated like this:
* pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X and pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y. Padding is appropriate mask padding.
* It is calculated as mask dimension / 2 where / is integer division. The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
* Also, sheared memory size in bytes should be provided. On host memory location in, original image converted to type T must be stored. However, on device memory location laplacian, morphological
* laplacian of the original image has to be stored.
*/
template<typename T>
static void morph_shock_launcher(T *dev_in1, T* dev_in2, T *dev_out, T *laplacian, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream = 0);
/*
* Invokes GPU kernel responsible for perfoming wanted basic morphological operation on given image vector. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask. For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Template parameter type determines morphological operation:
* 1) false -> DILATION
* 2) true -> EROSION
*/
template<typename T, bool type>
static void morph_basic(T *in, T *out, int width, int height, int padding);
/*
* Invokes GPU kernel responsible for perfoming wanted basic morphological operation on given image vector. Input vector dev_in is expected to be an image matrix on GPU memory containing objects
* of type T as elements. The vector containing the image matrix must have size size (pWidth + 2 * padding) * (pHeight + 2 * padding). Argument padding is a padding of the given mask. For example,
* if mask has dimensions 5x5, padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order,
* solving Smallest enclosing circle of circles problem. The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
* Also, shared memory size in bytes needs to be provided, as well as original image vector in host memory which elements are converted to type T on memory location in.
*
* Template parameter type determines morphological operation:
* 1) false -> CLOSING
* 2) true -> OPENING
*/
template<typename T, bool type>
static void morph_second_order_launcher(T *dev_in, T *dev_out, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream = 0);
/*
* Invokes GPU kernel responsible for perfoming wanted higher order morphological operation on given image vector. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Template parameter type determines morphological operation:
* 1) false -> CLOSING
* 2) true -> OPENING
*/
template<typename T, bool type>
static void morph_second_order(T *in, T *out, int width, int height, int padding);
/*
* Invokes GPU kernel responsible for perfoming morphological operations white top hat and black top hat on given image vector. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Template parameter type determines morphological operation:
* 1) false -> BLACK TOP HAT
* 2) true -> WHITE TOP HAT
*/
template<typename T, bool type>
static void morph_hats(T *in, T *out, int width, int height, int padding);
/*
* Invokes GPU kernel responsible for perfoming morphological operation Beucher gradient on given image vector. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_beucher(T *in, T *out, int width, int height, int padding, int *mask);
/*
* Invokes GPU kernel responsible for perfoming morphological operations self dual top hat on given image vector. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_sdth(T *in, T *out, int width, int height, int padding, int *mask);
/*
* Invokes GPU kernel responsible for perfoming morphological operations internal gradient and external gradient on given image vector. Input vector in is expected to be an image matrix containing
* objects of type T as elements. The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has
* dimensions 5x5, padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving
* smallest circle problem. The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Template parameter type determines morphological operation:
* 1) false -> EXTERNAL GRADIENT
* 2) true -> INTERNAL GRADIENT
*/
template<typename T, bool type>
static void morph_gradients(T *in, T *out, int width, int height, int padding);
/*
* Invokes GPU kernel responsible for perfoming morphological operation morphological Laplacian. Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_laplacian(T *in, T *out, int width, int height, int padding, int *mask);
/*
* Invokes GPU kernel responsible for perfoming morphological operation shock-filter Input vector in is expected to be an image matrix containing objects of type T as elements.
* The vector containing the image matrix must have size width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Output vector's size has to be width * height. Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving smallest circle problem.
* The type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_shock(T *in, T *out, int width, int height, int padding, int *mask);
/*
* Basic handle for invoking launcher methods which are invoking GPU kernels for performing all morphological operations introduced in the paper from B. Burgeth and A. Kleefeld. Pointers in and out
* must be host pointer. Memory location in must contain original image matrix which elements are converted to the type T. Result of the selected morphological operation will be stored on memory location
* out. This memory location should be preallocated to the size of width * height. Argument padding is a padding of the given mask (structural element). For example, if mask has dimensions 5x5, padding is 2.
* Argument iters defines number of iterations.
*
* Morphological operation is determined by morphType argument:
* 0) DILATION
* 1) EROSION
* 2) CLOSING
* 3) OPENING
* 4) BLACK TOP HAT
* 5) WHITE TOP HAT
* 6) SELF-DUAL TOP HAT
* 7) BEUCHER GRADIENT
* 8) EXTERNAL GRADIENT
* 9) INTERNAL GRADIENT
* 10) MORPHOLOGICAL LAPLACIAN
* 11) SHOCK FILTER
*/
template<typename T>
static void morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters = 0);
// HELPER METHODS
/*
* Helper method that creates output image (CImgFloatWrapper object) from an array of MorphColorMatrix objects result stored as a class variable.
*/
void createOutputImage();
/*
* Helper method for copying one array to another.
*/
template<typename T>
static void copy(T *in, T *out, int size);
/*
* Helper method that prepares grid for fill kernel.
*/
static inline void prepareGrid1(dim3 &gridDim, dim3 &blockDim, int height);
/*
* Helper method that prepares grid for the morph kernel. Arguments pWidth and pHeight must be multiples of THREADS_X and THREADS_Y constants, respectively.
* Morpe precisely, pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X and pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y.
*/
static inline void prepareGrid2(dim3 &gridDim, dim3 &blockDim, int pWidth, int pHeight);
/*
* Helper method that prepares grid for einstein kernel.
*/
static inline void prepareGrid3(dim3 &gridDim, dim3 &blockDim, int width, int height);
/*
* Reading mask from a file specified by given string. Also, mask dimension needs to be provided. Mask
* is expected to be a maskDim * maskDim matrix containing only 0 and 1.
*/
static void readMaskFromFile(int *maskPointer, int maskDim, const char *fileName);
// DEBUGGING
/*
* Helper method for printing matrix of MorphCircle objects to the standard output.
* Used for debbugging.
*/
static void __host__ __device__ print_shared_vector(Circle *in, int width, int height, int lda);
/*
* Helper method for printing matrix of MorphColorMatrix objects to the standard output.
* Used for debbugging.
*/
static void __host__ __device__ print_shared_vector(LoewnerMorphology::MorphColorMatrix *in, int width, int height, int lda);
/*
* Helper method for printing matrix of floats to the standard output.
* Used for debbugging.
*/
static void __host__ __device__ print_shared_vector(float *in, int width, int height, int lda);
};
void LoewnerMorphology::Morph::readMaskFromFile(int *maskPointer, int maskDim, const char *fileName) {
FILE *file = NULL;
open_file(file, fileName, "r");
for (int i = 0, n = maskDim * maskDim; i < n; i++) {
if (fscanf(file, "%d", maskPointer + i) != 1) {
printf("Error while reading file %s.\n", fileName);
exit(EXIT_FAILURE);
}
}
close_file(file, fileName);
}
void __host__ __device__ LoewnerMorphology::Morph::print_shared_vector(Circle *in, int width, int height, int lda) {
Circle *current = in;
for(int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
current[j].print(); printf(" ");
}
printf("\n");
current += lda;
}
}
void __host__ __device__ LoewnerMorphology::Morph::print_shared_vector(float *in, int width, int height, int lda) {
float *current = in;
for(int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
printf("%5.2f ", current[j]);
}
printf("\n");
current += lda;
}
}
void __host__ __device__ LoewnerMorphology::Morph::print_shared_vector(LoewnerMorphology::MorphColorMatrix *in, int width, int height, int lda) {
MorphColorMatrix *current = in;
for(int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
current[j].printMorphColorMatrix(); printf(" ");
}
printf("\n");
current += lda;
}
}
inline void LoewnerMorphology::Morph::prepareGrid1(dim3 &gridDim, dim3 &blockDim, int height) {
blockDim.x = BLOCK_SIZE;
blockDim.y = 1;
gridDim.x = 1;
gridDim.y = height;
}
inline void LoewnerMorphology::Morph::prepareGrid2(dim3 &gridDim, dim3 &blockDim, int pWidth, int pHeight) {
blockDim.x = THREADS_X;
blockDim.y = THREADS_Y;
gridDim.x = pWidth / THREADS_X;
gridDim.y = pHeight / THREADS_Y;
}
inline void LoewnerMorphology::Morph::prepareGrid3(dim3 &gridDim, dim3 &blockDim, int width, int height) {
blockDim.x = TILE_X;
blockDim.y = TILE_Y;
int pWidth = ((width + TILE_X - 1) / TILE_X) * TILE_X;
int pHeight = ((height + TILE_Y - 1) / TILE_Y) * TILE_Y;
gridDim.x = pWidth / TILE_X;
gridDim.y = pHeight / TILE_Y;
}
template<typename T>
void LoewnerMorphology::Morph::morph_einstein_async(T *image1, T *image2, int width, int height, int lda1, int lda2, cudaStream_t stream) {
dim3 blockDim;
dim3 gridDim;
prepareGrid3(gridDim, blockDim, width, height);
LoewnerMorphology::einstein_kernel<T><<<gridDim, blockDim, 0, stream>>>(image1, image2, width, height, lda1, lda2);
}
template<typename T, bool type, int N>
void LoewnerMorphology::Morph::morph_einstein_copy_original_launcher(T *dev_out1, T *dev_out2, T *in, T *out, int width, int height, cudaStream_t *streams) {
// calling einstein cernel asynchronusly with memory transfers
#pragma unroll
for (int i = 0; i < N; i++) {
int first = first(i, N, height) * width;
int chunkHeight = size(i, N, height);
size_t size = chunkHeight * width * sizeof(T);
cuda_exec(cudaMemcpyAsync(dev_out2 + first, in + first, size, cudaMemcpyHostToDevice, streams[i]));
morph_einstein_async(((type) ? dev_out2 : dev_out1) + first, ((type) ? dev_out1 : dev_out2) + first, width, chunkHeight, width, width, streams[i]);
cuda_exec(cudaMemcpyAsync(out + first, ((type) ? dev_out2 : dev_out1) + first, size, cudaMemcpyDeviceToHost, streams[i]));
}
}
template<typename T, int N>
void LoewnerMorphology::Morph::morph_einstein_copy_launcher(T *dev_out1, T *dev_out2, T *out, int width, int height, cudaStream_t *streams) {
// calling einstein cernel asynchronusly with memory transfers
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
int first = first(i, NUM_STREAMS, height) * width;
int chunkHeight = size(i, NUM_STREAMS, height);
size_t size = chunkHeight * width * sizeof(T);
morph_einstein_async(dev_out1 + first, dev_out2 + first, width, chunkHeight, width, width, streams[i]);
cuda_exec(cudaMemcpyAsync(out + first, dev_out1 + first, size, cudaMemcpyDeviceToHost, streams[i]));
}
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_basic_launcher(T *dev_in, T *dev_out, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream) {
dim3 blockDim;
dim3 gridDim;
// filling device memory with appropriate value
prepareGrid1(gridDim, blockDim, pHeight + 2 * padding);
LoewnerMorphology::fill<T><<<gridDim, blockDim, 0, stream>>>(dev_in, pWidth + 2 * padding, (type) ? T::max() : T::min());
// copying image to device memory
cuda_exec(cudaMemcpy2D(dev_in + padding * (pWidth + 2 * padding) + padding, (pWidth + 2 * padding) * sizeof(T), in, width * sizeof(T), width * sizeof(T), height, cudaMemcpyHostToDevice));
// invoking morph kernel
prepareGrid2(gridDim, blockDim, pWidth, pHeight);
LoewnerMorphology::morph_kernel<T, type><<<gridDim, blockDim, sharedSize, stream>>>(dev_in, dev_out, width, height, pWidth, pHeight, padding);
}
template<typename T>
void LoewnerMorphology::Morph::morph_shock_launcher(T *dev_in1, T* dev_in2, T *dev_out, T *laplacian, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream) {
dim3 blockDim;
dim3 gridDim;
// filling device memory with appropriate value
prepareGrid1(gridDim, blockDim, pHeight + 2 * padding);
LoewnerMorphology::fill<T><<<gridDim, blockDim, 0, stream>>>(dev_in1, pWidth + 2 * padding, T::min());
LoewnerMorphology::fill<T><<<gridDim, blockDim, 0, stream>>>(dev_in2, pWidth + 2 * padding, T::max());
cuda_exec(cudaStreamSynchronize(stream));
// copying image to device memory
cuda_exec(cudaMemcpy2D(dev_in1 + padding * (pWidth + 2 * padding) + padding, (pWidth + 2 * padding) * sizeof(T), in, width * sizeof(T), width * sizeof(T), height, cudaMemcpyHostToDevice));
cuda_exec(cudaMemcpy2D(dev_in2 + padding * (pWidth + 2 * padding) + padding, (pWidth + 2 * padding) * sizeof(T), in, width * sizeof(T), width * sizeof(T), height, cudaMemcpyHostToDevice));
// invoking morph kernela
prepareGrid2(gridDim, blockDim, pWidth, pHeight);
LoewnerMorphology::shock_kernel<T><<<gridDim, blockDim, 2 * sharedSize, stream>>>(dev_in1, dev_in2, dev_out, laplacian, width, height, pWidth, pHeight, padding);
cuda_exec(cudaStreamSynchronize(stream));
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_basic(T *in, T *out, int width, int height, int padding) {
T *dev_in = NULL; // device vector holding image matrix
T *dev_out = NULL; // device vector holding first output image
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
cuda_exec(cudaMalloc(&dev_in, inSize));
cuda_exec(cudaMalloc(&dev_out, outSize));
morph_basic_launcher<T, type>(dev_in, dev_out, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaDeviceSynchronize());
cuda_exec(cudaMemcpy(out, dev_out, outSize, cudaMemcpyDeviceToHost));
cuda_exec(cudaFree(dev_in));
cuda_exec(cudaFree(dev_out));
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_second_order_launcher(T *dev_in, T *dev_out, T *in, int width, int height, int pWidth, int pHeight, int padding, size_t sharedSize, cudaStream_t stream) {
dim3 blockDim1, blockDim2;
dim3 gridDim1, gridDim2;
prepareGrid1(gridDim1, blockDim1, pHeight + 2 * padding);
prepareGrid2(gridDim2, blockDim2, pWidth, pHeight);
// filling device memory with appropriate value
LoewnerMorphology::fill<T><<<gridDim1, blockDim1, 0, stream>>>(dev_in, pWidth + 2 * padding, (type) ? T::max() : T::min());
// copying image to device memory
cuda_exec(cudaMemcpy2DAsync(dev_in + padding * (pWidth + 2 * padding) + padding, (pWidth + 2 * padding) * sizeof(T), in, width * sizeof(T), width * sizeof(T), height, cudaMemcpyHostToDevice, stream));
// invoking morph kernela
LoewnerMorphology::morph_kernel<T, type><<<gridDim2, blockDim2, sharedSize, stream>>>(dev_in, dev_out, width, height, pWidth, pHeight, padding);
// filling device memory with appropriate value
LoewnerMorphology::fill<T><<<gridDim1, blockDim1, 0, stream>>>(dev_in, pWidth + 2 * padding, (!type) ? T::max() : T::min());
// copying image to device memory
cuda_exec(cudaMemcpy2DAsync(dev_in + padding * (pWidth + 2 * padding) + padding, (pWidth + 2 * padding) * sizeof(T), dev_out, width * sizeof(T), width * sizeof(T), height, cudaMemcpyDeviceToDevice, stream));
// invoking morph kernela
LoewnerMorphology::morph_kernel<T, !type><<<gridDim2, blockDim2, sharedSize, stream>>>(dev_in, dev_out, width, height, pWidth, pHeight, padding);
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_second_order(T *in, T *out, int width, int height, int padding) {
T *dev_in = NULL; // device vector holding image matrix
T *dev_out = NULL; // device vector holding first output image
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
cuda_exec(cudaMalloc(&dev_in, inSize));
cuda_exec(cudaMalloc(&dev_out, outSize));
morph_second_order_launcher<T, type>(dev_in, dev_out, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaDeviceSynchronize());
cuda_exec(cudaMemcpy(out, dev_out, outSize, cudaMemcpyDeviceToHost));
cuda_exec(cudaFree(dev_in));
cuda_exec(cudaFree(dev_out));
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_hats(T *in, T *out, int width, int height, int padding) {
T *dev_in = NULL; // device vector holding image matrix (padding)
T *dev_out1 = NULL; // device vector holding first output image
T *dev_out2 = NULL; // device vector holding original image
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
cuda_exec(cudaMalloc(&dev_in, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
morph_second_order_launcher<T, type>(dev_in, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaDeviceSynchronize());
morph_einstein_copy_original_launcher<T, type, NUM_STREAMS>(dev_out1, dev_out2, in, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cuda_exec(cudaFree(dev_in));
cuda_exec(cudaFree(dev_out1));
cuda_exec(cudaFree(dev_out2));
}
template<typename T>
void LoewnerMorphology::Morph::morph_sdth(T *in, T *out, int width, int height, int padding, int *mask) {
T *dev_in1 = NULL; // device vector holding image matrix (padding)
T *dev_in2 = NULL; // device vector holding image matrix (padding)
T *dev_out1 = NULL; // device vector holding first output image
T *dev_out2 = NULL; // device vector holding second output image
T *dev_out_temp = NULL; // device vector holding output image on second device (optional, if it is available)
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
int count = 0;
cuda_exec(cudaGetDeviceCount(&count));
if (count < 2) {
// code executed on one GPU
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
morph_second_order_launcher<T, false>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize, streams[0]);
morph_second_order_launcher<T, true>(dev_in2, dev_out2, in, width, height, pWidth, pHeight, padding, sharedSize, streams[1]);
cuda_exec(cudaStreamSynchronize(streams[0]));
cuda_exec(cudaStreamSynchronize(streams[1]));
cuda_exec(cudaFree(dev_in2));
} else {
// code executed on two GPUs
cuda_exec(cudaSetDevice(0));
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
cuda_exec(cudaSetDevice(1));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out_temp, outSize));
cuda_exec(cudaMemcpyToSymbol(maskMemory, mask, (2 * padding + 1) * (2 * padding + 1) * sizeof(int), cudaMemcpyHostToDevice));
cuda_exec(cudaSetDevice(0));
morph_second_order_launcher<T, false>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaSetDevice(1));
morph_second_order_launcher<T, true>(dev_in2, dev_out_temp, in, width, height, pWidth, pHeight, padding, sharedSize);
cudaSetDevice(0);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(1);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cuda_exec(cudaMemcpyPeer(dev_out2, 0, dev_out_temp, 1, outSize));
cudaSetDevice(1);
cuda_exec(cudaFree(dev_in2));
cuda_exec(cudaFree(dev_out_temp));
cudaSetDevice(0);
}
morph_einstein_copy_launcher<T, NUM_STREAMS>(dev_out1, dev_out2, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cuda_exec(cudaFree(dev_in1));
cuda_exec(cudaFree(dev_out1));
cuda_exec(cudaFree(dev_out2));
}
template<typename T>
void LoewnerMorphology::Morph::morph_beucher(T *in, T *out, int width, int height, int padding, int *mask) {
T *dev_in1 = NULL; // device vector holding image matrix (padding)
T *dev_in2 = NULL; // device vector holding image matrix (padding)
T *dev_out1 = NULL; // device vector holding first output image
T *dev_out2 = NULL; // device vector holding second output image
T *dev_out_temp = NULL; // device vector holding output image on second device (optional, if it is available)
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
int count = 0;
cuda_exec(cudaGetDeviceCount(&count));
if (count < 2) {
// code executed on one GPU
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
morph_basic_launcher<T, false>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize, streams[0]);
morph_basic_launcher<T, true>(dev_in2, dev_out2, in, width, height, pWidth, pHeight, padding, sharedSize, streams[1]);
cuda_exec(cudaStreamSynchronize(streams[0]));
cuda_exec(cudaStreamSynchronize(streams[1]));
cuda_exec(cudaFree(dev_in2));
} else {
// code executed on two GPUs
cuda_exec(cudaSetDevice(0));
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
cuda_exec(cudaSetDevice(1));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out_temp, outSize));
cuda_exec(cudaMemcpyToSymbol(maskMemory, mask, (2 * padding + 1) * (2 * padding + 1) * sizeof(int), cudaMemcpyHostToDevice));
cuda_exec(cudaSetDevice(0));
morph_basic_launcher<T, false>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaSetDevice(1));
morph_basic_launcher<T, true>(dev_in2, dev_out_temp, in, width, height, pWidth, pHeight, padding, sharedSize);
cudaSetDevice(0);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(1);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cuda_exec(cudaMemcpyPeer(dev_out2, 0, dev_out_temp, 1, outSize));
cudaSetDevice(1);
cuda_exec(cudaFree(dev_in2));
cuda_exec(cudaFree(dev_out_temp));
cudaSetDevice(0);
}
morph_einstein_copy_launcher<T, NUM_STREAMS>(dev_out1, dev_out2, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cuda_exec(cudaFree(dev_in1));
cuda_exec(cudaFree(dev_out1));
cuda_exec(cudaFree(dev_out2));
}
template<typename T, bool type>
void LoewnerMorphology::Morph::morph_gradients(T *in, T *out, int width, int height, int padding) {
T *dev_in = NULL; // device vector holding image matrix (padding)
T *dev_out = NULL; // device vector holding first output image
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
cuda_exec(cudaMalloc(&dev_in, inSize));
cuda_exec(cudaMalloc(&dev_out, outSize));
morph_basic_launcher<T, type>(dev_in, dev_out, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaDeviceSynchronize());
morph_einstein_copy_original_launcher<T, type, NUM_STREAMS>(dev_out, dev_in, in, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cuda_exec(cudaFree(dev_in));
cuda_exec(cudaFree(dev_out));
}
template<typename T>
void LoewnerMorphology::Morph::morph_laplacian(T *in, T *out, int width, int height, int padding, int *mask) {
T *dev_in1 = NULL; // device vector holding image matrix (padding)
T *dev_in2 = NULL; // device vector holding image matrix (padding)
T *dev_out1 = NULL; // device vector holding first output image
T *dev_out2 = NULL; // device vector holding second output image
T *dev_out_temp = NULL; // device vector holding output image on second device (optional, if it is available)
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
int count = 0;
cuda_exec(cudaGetDeviceCount(&count));
if (count < 2) {
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
morph_basic_launcher<T, true>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize, streams[0]);
morph_basic_launcher<T, false>(dev_in2, dev_out2, in, width, height, pWidth, pHeight, padding, sharedSize, streams[1]);
cuda_exec(cudaMemcpyAsync(dev_in1, in, outSize, cudaMemcpyHostToDevice, streams[0]));
morph_einstein_async(dev_out1, dev_in1, width, height, width, width, streams[0]);
cuda_exec(cudaMemcpyAsync(dev_in2, in, outSize, cudaMemcpyHostToDevice, streams[1]));
morph_einstein_async(dev_in2, dev_out2, width, height, width, width, streams[1]);
cuda_exec(cudaStreamSynchronize(streams[0]));
cuda_exec(cudaStreamSynchronize(streams[1]));
morph_einstein_copy_launcher<T, NUM_STREAMS>(dev_out1, dev_in2, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cuda_exec(cudaFree(dev_in2));
} else {
// code executed on two GPUs
cuda_exec(cudaSetDevice(0));
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
cuda_exec(cudaSetDevice(1));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out_temp, outSize));
cuda_exec(cudaMemcpyToSymbol(maskMemory, mask, (2 * padding + 1) * (2 * padding + 1) * sizeof(int), cudaMemcpyHostToDevice));
cuda_exec(cudaSetDevice(0));
morph_basic_launcher<T, true>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaMemcpyAsync(dev_in1, in, outSize, cudaMemcpyHostToDevice));
morph_einstein_async(dev_out1, dev_in1, width, height, width, width);
cuda_exec(cudaSetDevice(1));
morph_basic_launcher<T, false>(dev_in2, dev_out_temp, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaMemcpyAsync(dev_in2, in, outSize, cudaMemcpyHostToDevice));
morph_einstein_async(dev_in2, dev_out_temp, width, height, width, width);
cudaSetDevice(0);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(1);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cuda_exec(cudaMemcpyPeer(dev_out2, 0, dev_in2, 1, outSize));
morph_einstein_copy_launcher<T, NUM_STREAMS>(dev_out1, dev_in2, out, width, height, streams);
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamSynchronize(streams[i]));
}
cudaSetDevice(1);
cuda_exec(cudaFree(dev_in2));
cuda_exec(cudaFree(dev_out_temp));
cudaSetDevice(0);
}
cuda_exec(cudaFree(dev_in1));
cuda_exec(cudaFree(dev_out1));
cuda_exec(cudaFree(dev_out2));
}
template<typename T>
void LoewnerMorphology::Morph::morph_shock(T *in, T *out, int width, int height, int padding, int *mask) {
T *dev_in1 = NULL; // device vector holding image matrix (padding)
T *dev_in2 = NULL; // device vector holding image matrix (padding)
T *dev_out1 = NULL; // device vector holding first output image
T *dev_out2 = NULL; // device vector holding second output image
T *dev_out_temp = NULL; // device vector holding output image on second device (optional, if it is available)
int pWidth = ((width + THREADS_X - 1) / THREADS_X) * THREADS_X; // width of the image (rounded to the multiple of THREAD_X)
int pHeight = ((height + THREADS_Y - 1) / THREADS_Y) * THREADS_Y; // hight of the image (rounded to the multiple of THREAD_Y)
size_t inSize = (pWidth + 2 * padding) * (pHeight + 2 * padding) * sizeof(T); // size of the input device vector (Circle is used for max and min calculations as described in the paper)
size_t outSize = width * height * sizeof(T); // size of the (each) output device vector
int sharedWidth = THREADS_X + 2 * padding + 1; // shared memory width (avoiding bank conflicts)
int sharedHeight = THREADS_Y + 2 * padding; // shared memory height
size_t sharedSize = sharedWidth * sharedHeight * sizeof(Circle); // size of the shared memory in bypes
int count = 0;
cuda_exec(cudaGetDeviceCount(&count));
if (count < 2) {
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
morph_basic_launcher<T, true>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize, streams[0]);
morph_basic_launcher<T, false>(dev_in2, dev_out2, in, width, height, pWidth, pHeight, padding, sharedSize, streams[1]);
cuda_exec(cudaMemcpyAsync(dev_in1, in, outSize, cudaMemcpyHostToDevice, streams[0]));
morph_einstein_async(dev_out1, dev_in1, width, height, width, width, streams[0]);
cuda_exec(cudaMemcpyAsync(dev_in2, in, outSize, cudaMemcpyHostToDevice, streams[1]));
morph_einstein_async(dev_in2, dev_out2, width, height, width, width, streams[1]);
cuda_exec(cudaStreamSynchronize(streams[0]));
cuda_exec(cudaStreamSynchronize(streams[1]));
morph_einstein_async(dev_out1, dev_in2, width, height, width, width);
cuda_exec(cudaDeviceSynchronize());
cuda_exec(cudaFree(dev_in2));
} else {
// code executed on two GPUs
cuda_exec(cudaSetDevice(0));
cuda_exec(cudaMalloc(&dev_in1, inSize));
cuda_exec(cudaMalloc(&dev_out1, outSize));
cuda_exec(cudaMalloc(&dev_out2, outSize));
cuda_exec(cudaSetDevice(1));
cuda_exec(cudaMalloc(&dev_in2, inSize));
cuda_exec(cudaMalloc(&dev_out_temp, outSize));
cuda_exec(cudaMemcpyToSymbol(maskMemory, mask, (2 * padding + 1) * (2 * padding + 1) * sizeof(int), cudaMemcpyHostToDevice));
cuda_exec(cudaSetDevice(0));
morph_basic_launcher<T, false>(dev_in1, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaMemcpyAsync(dev_in1, in, outSize, cudaMemcpyHostToDevice));
morph_einstein_async(dev_out1, dev_in1, width, height, width, width);
cuda_exec(cudaSetDevice(1));
morph_basic_launcher<T, true>(dev_in2, dev_out_temp, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaMemcpyAsync(dev_in2, in, outSize, cudaMemcpyHostToDevice));
morph_einstein_async(dev_in2, dev_out_temp, width, height, width, width);
cudaSetDevice(0);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(1);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cuda_exec(cudaMemcpyPeer(dev_out2, 0, dev_in2, 1, outSize));
morph_einstein_async(dev_out1, dev_in2, width, height, width, width);
cuda_exec(cudaDeviceSynchronize());
cudaSetDevice(1);
cuda_exec(cudaFree(dev_in2));
cuda_exec(cudaFree(dev_out_temp));
cudaSetDevice(0);
}
// there is laplacian on location dev_out1
cuda_exec(cudaMalloc(&dev_in2, inSize));
morph_shock_launcher(dev_in1, dev_in2, dev_out2, dev_out1, in, width, height, pWidth, pHeight, padding, sharedSize);
cuda_exec(cudaDeviceSynchronize());
cuda_exec(cudaMemcpy(out, dev_out2, outSize, cudaMemcpyDeviceToHost));
cuda_exec(cudaFree(dev_in1));
cuda_exec(cudaFree(dev_in2));
cuda_exec(cudaFree(dev_out1));
cuda_exec(cudaFree(dev_out2));
}
template<typename T>
void LoewnerMorphology::Morph::morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters) {
if (iters < 1) {
printf("Operation cannot be executed. Number of iterations must be greater than 0. You provided %d.\n", iters);
exit(EXIT_FAILURE);
}
cuda_exec(cudaSetDevice(0));
// copying mask to constant memory
int maskSize = (2 * padding + 1) * (2 * padding + 1) * sizeof(int);
cuda_exec(cudaMemcpyToSymbol(maskMemory, mask, maskSize, cudaMemcpyHostToDevice));
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamCreate(&streams[i]));
}
switch (morphType) {
case 0:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_basic<T, false>(in, out, width, height, padding);
} else {
morph_basic<T, false>(out, out, width, height, padding);
}
}
break;
case 1:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_basic<T, true>(in, out, width, height, padding);
} else {
morph_basic<T, true>(out, out, width, height, padding);
}
}
break;
case 2:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_second_order<T, false>(in, out, width, height, padding);
} else {
morph_second_order<T, false>(out, out, width, height, padding);
}
}
break;
case 3:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_second_order<T, true>(in, out, width, height, padding);
} else {
morph_second_order<T, true>(out, out, width, height, padding);
}
}
break;
case 4:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_hats<T, false>(in, out, width, height, padding);
} else {
morph_hats<T, false>(out, out, width, height, padding);
}
}
break;
case 5:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_hats<T, true>(in, out, width, height, padding);
} else {
morph_hats<T, true>(out, out, width, height, padding);
}
}
break;
case 6:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_sdth<T>(in, out, width, height, padding, mask);
} else {
morph_sdth<T>(out, out, width, height, padding, mask);
}
}
break;
case 7:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_beucher<T>(in, out, width, height, padding, mask);
} else {
morph_beucher<T>(out, out, width, height, padding, mask);
}
}
break;
case 8:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_gradients<T, false>(in, out, width, height, padding);
} else {
morph_gradients<T, false>(out, out, width, height, padding);
}
}
break;
case 9:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_gradients<T, true>(in, out, width, height, padding);
} else {
morph_gradients<T, true>(out, out, width, height, padding);
}
}
break;
case 10:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_laplacian<T>(in, out, width, height, padding, mask);
} else {
morph_laplacian<T>(out, out, width, height, padding, mask);
}
}
break;
case 11:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_shock<T>(in, out, width, height, padding, mask);
} else {
morph_shock<T>(out, out, width, height, padding, mask);
}
}
break;
}
#pragma unroll
for (int i = 0; i < NUM_STREAMS; i++) {
cuda_exec(cudaStreamDestroy(streams[i]));
}
}
LoewnerMorphology::Morph::Morph(const char *imageFile, const char *maskFile, int maskDim) {
double *image = NULL;
double *data = NULL;
if (maskDim % 2 == 0 || maskDim * maskDim > 1024) {
printf("Mask dimension should be odd and its squere should be less than 1024.\n");
exit(EXIT_FAILURE);
}
mask = (int *)malloc(maskDim * maskDim * sizeof(int));
readMaskFromFile(mask, maskDim, maskFile);
padding = maskDim / 2;
omp_set_num_threads(OMP_THREAD_NUM);
inputImage = new CImgFloatWrapper(imageFile);
outputImage = nullptr;
width = inputImage->width();
height = inputImage->height();
spectrum = inputImage->spectrum();
size = width * height;
image = (double *)malloc(size * spectrum * sizeof(double));
data = (double *)malloc(size * spectrum * sizeof(double));
cuda_exec(cudaMallocHost(&matrices, size * sizeof(LoewnerMorphology::MorphColorMatrix)));
cuda_exec(cudaMallocHost(&result, size * sizeof(LoewnerMorphology::MorphColorMatrix)));
Conversions::type2double(inputImage->data(), image, size * spectrum);
Conversions::rgb2mhcl(image, image + size, image + 2 * size, data, data + size, data + 2 * size, size);
Conversions::mhcl2matrix(data, data + size, data + 2 * size, matrices, size);
free(image);
free(data);
}
LoewnerMorphology::Morph::~Morph() {
free(mask);
cuda_exec(cudaFreeHost(matrices));
cuda_exec(cudaFreeHost(result));
delete inputImage;
delete outputImage;
}
void LoewnerMorphology::Morph::createOutputImage() {
double *image = (double *)malloc(size * spectrum * sizeof(double));
double *data = (double *)malloc(size * spectrum * sizeof(double));
float *out = (float *)malloc(size * spectrum * sizeof(float));
Conversions::matrix2mhcl(result, data, data + size, data + 2 * size, size);
Conversions::mhcl2rgb(data, data + size, data + 2 * size, image, image + size, image + 2 * size, size);
Conversions::double2type(image, out, size * spectrum);
if (outputImage != nullptr) {
delete outputImage;
}
outputImage = new CImgFloatWrapper(out, width, height, spectrum);
free(image);
free(data);
free(out);
}
void LoewnerMorphology::Morph::dilation(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 0, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::erosion(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 1, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::closing(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 2, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::opening(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 3, iter );
createOutputImage();
}
void LoewnerMorphology::Morph::blackTopHat(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 4, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::whiteTopHat(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 5, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::selfDualTopHat(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 6, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::beucherGradient(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 7, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::externalGradient(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 8, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::internalGradient(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 9, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::laplacian(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 10, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::shockFilter(int iter) {
morph_handle(matrices, result, width, height, padding, mask, 11, iter);
createOutputImage();
}
void LoewnerMorphology::Morph::displayOriginalImage() {
inputImage->display();
}
void LoewnerMorphology::Morph::displayResultImage() {
if (outputImage == nullptr) {
printf("There is no result to display.\n");
return;
}
outputImage->display();
}
template<typename T>
void LoewnerMorphology::Morph::copy(T *in, T *out, int size) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
out[i] = in[i];
}
}
float *LoewnerMorphology::Morph::returnResult() {
if (outputImage == nullptr) {
printf("There is no result to return.\n");
return nullptr;
}
float *out = (float *)malloc(size * spectrum * sizeof(float));
copy(outputImage->data(), out, size * spectrum);
return out;
}
void LoewnerMorphology::Morph::saveResult(const char *fileName) {
if (outputImage == nullptr) {
printf("There is no result to save.\n");
return;
}
outputImage->save(fileName);
}
|
Shuffle.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include "bb/Manager.h"
#include "bb/Model.h"
namespace bb {
// Shuffle
class Shuffle : public Model
{
using _super = Model;
public:
static inline std::string ModelName(void) { return "Shuffle"; }
static inline std::string ObjectName(void){ return ModelName(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
indices_t m_input_shape;
indices_t m_output_shape;
index_t m_shuffle_unit = 0;
public:
// 生成情報
struct create_t
{
index_t shuffle_unit = 0;
indices_t output_shape;
};
protected:
Shuffle() {}
Shuffle(create_t const &create)
{
m_output_shape = create.output_shape;
m_shuffle_unit = create.shuffle_unit;
}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args)
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override
{
_super::PrintInfoText(os, indent, columns, nest, depth);
// os << indent << " input shape : " << GetInputShape();
// os << indent << " output shape : " << GetOutputShape();
os << indent << " shuffle_unit : " << m_shuffle_unit << std::endl;
}
public:
~Shuffle() {}
static std::shared_ptr<Shuffle> Create(create_t const &create)
{
return std::shared_ptr<Shuffle>(new Shuffle(create));
}
static std::shared_ptr<Shuffle> Create(index_t shuffle_unit, indices_t output_shape=indices_t())
{
create_t create;
create.shuffle_unit = shuffle_unit;
create.output_shape = output_shape;
return Create(create);
}
static std::shared_ptr<Shuffle> Create(index_t shuffle_unit, index_t output_node_size)
{
return Create(shuffle_unit, indices_t({output_node_size}));
}
static std::shared_ptr<Shuffle> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11
static std::shared_ptr<Shuffle> CreatePy(index_t shuffle_unit, indices_t output_shape=indices_t())
{
create_t create;
create.shuffle_unit = shuffle_unit;
create.output_shape = output_shape;
return Create(create);
}
#endif
/**
* @brief 入力形状設定
* @detail 入力形状を設定する
* 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする
* 同一形状を指定しても内部変数は初期化されるものとする
* @param shape 1フレームのノードを構成するshape
* @return 出力形状を返す
*/
indices_t SetInputShape(indices_t shape) override
{
m_input_shape = shape;
if ( m_output_shape.empty() || CalcShapeSize(shape) != CalcShapeSize(m_output_shape) ) {
m_output_shape = m_input_shape;
}
BB_ASSERT(CalcShapeSize(m_output_shape) % m_shuffle_unit == 0);
return m_output_shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
// 戻り値のサイズ設定
FrameBuffer y_buf( x_buf.GetFrameSize(), m_output_shape, x_buf.GetType());
#ifdef BB_WITH_CUDA
if ( !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_x = x_buf.LockDeviceMemoryConst();
auto ptr_y = y_buf.LockDeviceMemory(true);
bbcu_Shuffle_Forward<int>(
(int const *)ptr_x.GetAddr(),
(int *)ptr_y.GetAddr(),
(unsigned int )m_shuffle_unit,
(unsigned int )x_buf.GetNodeSize(),
(unsigned int )x_buf.GetFrameSize(),
(unsigned int )(x_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
{
// 汎用版
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = x_buf.GetNodeSize();
index_t stride_size = x_buf.GetFrameStride();
index_t y_unit_size = m_shuffle_unit;
index_t x_unit_size = node_size / y_unit_size;
auto x_ptr = (std::uint8_t *)x_buf.LockMemoryConst().GetAddr();
auto y_ptr = (std::uint8_t *)y_buf.LockMemory().GetAddr();
#pragma omp parallel for
for ( index_t i = 0; i < x_unit_size; ++i ) {
for ( index_t j = 0; j < y_unit_size; ++j ) {
memcpy(&y_ptr[(i*y_unit_size+j)*stride_size], &x_ptr[(j*x_unit_size+i)*stride_size], stride_size);
}
}
return y_buf;
}
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
inline FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return FrameBuffer();
}
// 戻り値のサイズ設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, dy_buf.GetType());
#ifdef BB_WITH_CUDA
if ( !m_host_only && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_dy = dy_buf.LockDeviceMemoryConst();
auto ptr_dx = dx_buf.LockDeviceMemory(true);
bbcu_Shuffle_Backward<int>(
(int const *)ptr_dy.GetAddr(),
(int *)ptr_dx.GetAddr(),
(unsigned int )m_shuffle_unit,
(unsigned int )dy_buf.GetNodeSize(),
(unsigned int )dy_buf.GetFrameSize(),
(unsigned int )(dy_buf.GetFrameStride() / sizeof(int))
);
return dx_buf;
}
#endif
{
// 汎用版
index_t frame_size = dy_buf.GetFrameSize();
index_t node_size = dy_buf.GetNodeSize();
index_t stride_size = dy_buf.GetFrameStride();
index_t y_unit_size = m_shuffle_unit;
index_t x_unit_size = node_size / y_unit_size;
auto dy_ptr = (std::uint8_t *)dy_buf.LockMemoryConst().GetAddr();
auto dx_ptr = (std::uint8_t *)dx_buf.LockMemory().GetAddr();
#pragma omp parallel for
for ( index_t i = 0; i < y_unit_size; ++i ) {
for ( index_t j = 0; j < x_unit_size; ++j ) {
memcpy(&dx_ptr[(i*x_unit_size+j)*stride_size], &dy_ptr[(j*y_unit_size+i)*stride_size], stride_size);
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_shuffle_unit);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_shuffle_unit);
// 再構築
}
};
}
// end of file
|
keyval.h | /*
* The MIT License (MIT)
*
* Copyright (c) 2016 Eduard L�pez
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* MODIFIED: Muhammad Haseeb, 2020
*
*/
#pragma once
#include <omp.h>
#include "common.hpp"
#ifdef USE_OMP
/*
* FUNCTION: KeyVal_Parallel_Internal
*
* DESCRIPTION: Internal Parallel Key Value Sort
*
* INPUT:
* @key : Key array
* @val : Value array
* @left : Start index of arrays
* @right : End index of arrays
* @cutoff: Sort in serial if right-left < cutoff
*
* OUTPUT: none
*/
template<class K, class V>
VOID KeyVal_Parallel_Internal(K *key, V *val, int_t left, int_t right, int_t cutoff)
{
int_t i = left, j = right;
K tmp;
V tmp2;
K pivotkey = key[(left + right) / 2];
{
/* PARTITION PART */
while (i <= j)
{
while (key[i] < pivotkey)
i++;
while (key[j] > pivotkey)
j--;
if (i <= j)
{
tmp = key[i];
tmp2 = val[i];
key[i] = key[j];
val[i] = val[j];
key[j] = tmp;
val[j] = tmp2;
i++;
j--;
}
}
}
if (((right - left) < cutoff))
{
if (left < j)
{
KeyVal_Parallel_Internal<K, V>(key, val, left, j, cutoff);
}
if (i < right)
{
KeyVal_Parallel_Internal<K, V>(key, val, i, right, cutoff);
}
}
else
{
#pragma omp task
{
KeyVal_Parallel_Internal<K, V>(key, val, left, j, cutoff);
}
#pragma omp task
{
KeyVal_Parallel_Internal<K, V>(key, val, i, right, cutoff);
}
}
}
/*
* FUNCTION: KeyVal_Parallel
*
* DESCRIPTION: Internal Parallel Key Value Sort
*
* INPUT:
* @key : Key array
* @val : Value array
* @lenArray : Length of arrays
* @numThreads: Number of parallel threads
*
* OUTPUT: none
*/
template<class K, class V>
VOID KeyVal_Parallel(K* key, V *val, uint_t lenArray, uint_t numThreads)
{
int_t cutoff = 1000;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
KeyVal_Parallel_Internal<K, V>(key, val, 0, lenArray - 1, cutoff);
}
}
}
#endif /* USE_OMP */
/*
* FUNCTION: KeyVal_Serial_Internal
*
* DESCRIPTION: Internal Serial Key Value Sort
*
* INPUT:
* @key : Key array
* @val : Value array
* @left : Start index of arrays
* @right: End index of arrays
*
* OUTPUT: none
*/
template<class K, class V>
VOID KeyVal_Serial_Internal(K *key, V *val, int_t low, int_t high)
{
int_t i = low;
int_t j = high;
K pivotkey = key[(i + j) / 2];
K tmpkey;
V tmpval;
while (i <= j)
{
while (key[i] < pivotkey)
i++;
while (key[j] > pivotkey)
j--;
if (i <= j)
{
tmpkey = key[i];
tmpval = val[i];
key[i] = key[j];
val[i] = val[j];
key[j] = tmpkey;
val[j] = tmpval;
i++;
j--;
}
}
if (j > low)
KeyVal_Serial_Internal(key, val, low, j);
if (i < high)
KeyVal_Serial_Internal(key, val, i, high);
}
/*
* FUNCTION: KeyVal_Serial
*
* DESCRIPTION: Internal Parallel Key Value Sort
*
* INPUT:
* @key : Key array
* @val : Value array
* @lenArray: Length of arrays
*
* OUTPUT: none
*/
template<class K, class V>
VOID KeyVal_Serial(K *key, V *val, uint_t lenArray) { KeyVal_Serial_Internal<K, V>(key, val, 0, lenArray-1); }
|
oracle_fmt_plug.c | /*
* This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* UTF-8 support: Copyright magnum 2012 and hereby released to the general
* public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, is permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oracle;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oracle);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "oracle"
#define FORMAT_NAME "Oracle 10"
#define FORMAT_TAG "O$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 120 // worst case UTF-8 is 40 characters of Unicode, that'll do
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (MAX_USERNAME_LEN*2 + 4) // also contain the NULL
#define SALT_ALIGN 2
#define CIPHERTEXT_LENGTH 16
#define MAX_INPUT_LEN (CIPHERTEXT_LENGTH + 3 + MAX_USERNAME_LEN * (options.input_enc == UTF_8 ? 3 : 1))
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
//#define DEBUG_ORACLE
static struct fmt_tests tests[] = {
{"O$SYSTEM#9EEDFA0AD26C6D52", "THALES" },
{"O$SIMON#4F8BC1809CB2AF77", "A"},
{"O$SIMON#183D72325548EF11", "THALES2" },
{"O$SIMON#C4EB3152E17F24A4", "TST" },
{"O$BOB#b02c8e79ed2e7f46", "LAPIN" },
{"O$BOB#6bb4e95898c88011", "LAPINE" },
{"O$BOB#cdc6b483874b875b", "GLOUGLOU" },
{"O$BOB#ef1f9139db2d5279", "GLOUGLOUTER" },
{"O$BOB#c0ee5107c9a080c1", "AZERTYUIOP" },
{"O$BOB#99e8b231d33772f9", "CANARDWC" },
{"O$BOB#da3224126a67c8ed", "COUCOU_COUCOU" },
{"O$bob#ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI" },
{"9EEDFA0AD26C6D52", "THALES", {"SYSTEM"} },
{"4F8BC1809CB2AF77", "A", {"SIMON"} },
{"183D72325548EF11", "THALES2", {"SIMON"} },
{"C4EB3152E17F24A4", "TST", {"SIMON"} },
{"b02c8e79ed2e7f46", "LAPIN", {"BOB"} },
{"6bb4e95898c88011", "LAPINE", {"BOB"} },
{"cdc6b483874b875b", "GLOUGLOU", {"bob"} }, // put some low case in there, to make SURE the up case conversion works.
{"ef1f9139db2d5279", "GLOUGLOUTER", {"bob"} }, // also these 2 make sure lower cased passwords 'match' the 'get_key' method in the format tests.
{"c0ee5107c9a080c1", "AZERTYUIOP", {"BOB"} },
{"99e8b231d33772f9", "CANARDWC", {"BOB"} },
{"da3224126a67c8ed", "COUCOU_COUCOU", {"BOB"} },
{"ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI", {"BOB"} },
{NULL}
};
#if ARCH_LITTLE_ENDIAN
#define ENDIAN_SHIFT_L << 8
#define ENDIAN_SHIFT_R >> 8
#else
#define ENDIAN_SHIFT_L
#define ENDIAN_SHIFT_R
#endif
static UTF16 cur_salt[SALT_SIZE / 2 + PLAINTEXT_LENGTH];
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int (*key_length);
static uint32_t (*crypt_key)[2];
static DES_key_schedule desschedule_static;
static int salt_length;
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
int l;
/*
* 2 cases
* 1 - it comes from the disk, and does not have O$ + salt
* 2 - it comes from memory, and has got O$ + salt + # + blah
*/
if (strnlen(ciphertext, MAX_INPUT_LEN + 1) > MAX_INPUT_LEN)
return 0;
if (!memcmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
{
int len;
char name[MAX_USERNAME_LEN + 1];
UTF16 name16[MAX_USERNAME_LEN + 1 + 1];
ciphertext += FORMAT_TAG_LEN;
l = strlen(ciphertext) - CIPHERTEXT_LENGTH;
if (l <= 0)
return 0;
if (ciphertext[l-1] != '#')
return 0;
strnzcpy(name, ciphertext, sizeof(name));
len = enc_to_utf16(name16, MAX_USERNAME_LEN + 1,
(UTF8*)name, strlen(name));
if (len < 0) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
}
else
{
if (strlen(ciphertext)!=CIPHERTEXT_LENGTH)
return 0;
l = 0;
}
for (i = l; i < l + CIPHERTEXT_LENGTH; i++){
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
if (!split_fields[0])
return split_fields[1];
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (strnlen(split_fields[1], CIPHERTEXT_LENGTH + 1) == CIPHERTEXT_LENGTH) {
cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 4);
sprintf (cp, "%s%s#%s", FORMAT_TAG, split_fields[0], split_fields[1]);
if (valid(cp, self)) {
UTF8 tmp8[MAX_USERNAME_LEN * 3 + 1];
int utf8len;
// we no longer need this. It was just used for valid(). We will recompute
// all lengths, after we do an upcase, since upcase can change the length of the
// utf8 string.
MEM_FREE(cp);
// Upcase user name, --encoding aware
utf8len = enc_uc(tmp8, sizeof(tmp8), (unsigned char*)split_fields[0], strlen(split_fields[0]));
cp = mem_alloc_tiny(utf8len + strlen(split_fields[1]) + 4, MEM_ALIGN_NONE);
sprintf (cp, "%s%s#%s", FORMAT_TAG, tmp8, split_fields[1]);
#ifdef DEBUG_ORACLE
printf ("tmp8 : %s\n", tmp8);
#endif
return cp;
}
MEM_FREE(cp);
}
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[FORMAT_TAG_LEN + sizeof(cur_salt) + 1 + CIPHERTEXT_LENGTH];
char *cp;
strnzcpy(out, ciphertext, sizeof(out));
enc_strupper(&out[FORMAT_TAG_LEN]);
cp = strrchr(out, '#');
if (cp)
strlwr(cp);
return out;
}
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule_static);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
key_length = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*key_length));
}
static void done(void)
{
MEM_FREE(key_length);
MEM_FREE(crypt_key);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static void set_salt(void *salt) {
salt_length = ((unsigned short *)salt)[0];
memcpy(cur_salt, &((unsigned short *)salt)[1], salt_length);
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
strcpy(plain_key[index], key);
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length[index] = enc_to_utf16((UTF16 *)cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length[index] < 0)
key_length[index] = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length[index] = utf16_uc((UTF16 *)cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length[index]);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length[index] < 0)
key_length[index] *= -1;
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
key_length[index] *= sizeof(UTF16);
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)&cur_key[index][0], key_length[index]);
#endif
}
static char *get_key(int index) {
static UTF8 UC_Key[PLAINTEXT_LENGTH*3*3+1];
// Calling this will ONLY upcase characters 'valid' in the code page. There are MANY
// code pages which mssql WILL upcase the letter (in UCS-2), but there is no upper case value
// in the code page. Thus we MUST keep the lower cased letter in this case.
enc_uc(UC_Key, sizeof(UC_Key), (UTF8*)plain_key[index], strlen(plain_key[index]));
return (char*)UC_Key;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[sizeof(cur_salt)];
unsigned char buf2[SALT_SIZE + PLAINTEXT_LENGTH*2];
DES_key_schedule sched_local;
unsigned int l;
l = salt_length + key_length[idx];
memcpy(buf2, cur_salt, salt_length);
memcpy(buf2 + salt_length, cur_key[idx], key_length[idx]);
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf2, salt_length+key_length[idx]);
#endif
crypt_key[idx][0] = 0;
crypt_key[idx][1] = 0;
DES_ncbc_encrypt(buf2, buf, l, &desschedule_static, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);
DES_set_key((DES_cblock *)crypt_key[idx], &sched_local);
crypt_key[idx][0] = 0;
crypt_key[idx][1] = 0;
DES_ncbc_encrypt(buf2, buf, l, &sched_local, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" crypt_key ", (unsigned char*)&crypt_key[idx][0], 8);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
static unsigned char *out3;
int l;
int i;
if (!out3) out3 = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
l = strlen(ciphertext) - CIPHERTEXT_LENGTH;
for (i=0;i<BINARY_SIZE;i++)
{
out3[i] = atoi16[ARCH_INDEX(ciphertext[i*2+l])]*16
+ atoi16[ARCH_INDEX(ciphertext[i*2+l+1])];
}
return out3;
}
static void * get_salt(char * ciphertext)
{
static UTF16 *out;
UTF8 salt[SALT_SIZE + 1];
int l;
if (!out) out = mem_alloc_tiny(SALT_SIZE+2, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE+2);
ciphertext += FORMAT_TAG_LEN;
l = 0;
while( ciphertext[l] && (ciphertext[l]!='#') )
{
salt[l] = ciphertext[l];
l++;
if (l >= SALT_SIZE-2) break;
}
salt[l] = 0;
// Encoding-aware shift to upper-case
enc_strupper((char*)salt);
l = enc_to_utf16_be(&out[1], MAX_USERNAME_LEN, (UTF8 *)salt, l);
out[0] = (l<<1);
return out;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int idx) { return crypt_key[idx][0] & PH_MASK_0; }
static int get_hash_1(int idx) { return crypt_key[idx][0] & PH_MASK_1; }
static int get_hash_2(int idx) { return crypt_key[idx][0] & PH_MASK_2; }
static int get_hash_3(int idx) { return crypt_key[idx][0] & PH_MASK_3; }
static int get_hash_4(int idx) { return crypt_key[idx][0] & PH_MASK_4; }
static int get_hash_5(int idx) { return crypt_key[idx][0] & PH_MASK_5; }
static int get_hash_6(int idx) { return crypt_key[idx][0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
int i;
uint32_t b = *(uint32_t*)binary;
for (i = 0; i < count; ++i)
if (b == *((uint32_t*)(crypt_key[i])) )
return 1;
return 0;
}
static int cmp_one(void *binary, int idx)
{
return !memcmp(binary, crypt_key[idx], sizeof(crypt_key[idx]));
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_oracle = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
stinger-physmap.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include "stinger-physmap.h"
#include "stinger-atomics.h"
#define MARKERINT INT64_MAX
int64_t MARKER_;
void * MARKER = (void *)&MARKER_;
#define CHILDREN_COUNT 256
/* struct defs */
typedef struct tree_node {
struct tree_node * children[CHILDREN_COUNT];
struct tree_node * parent;
uint8_t isEndpoint;
int64_t vertexID;
int64_t depth;
char value;
} tree_node_t;
struct stinger_physmap {
tree_node_t keyTree[MAX_NODES];
int64_t keyTreeTop;
tree_node_t * vtxStack[MAX_VTXID];
int64_t vtxStackTop;
};
/* internal functions protos */
tree_node_t *
allocateTreeNode (stinger_physmap_t * map, tree_node_t * parent, int64_t depth, char value);
int
insertIntoTree(stinger_physmap_t * map, tree_node_t ** node, char * string, int64_t length);
/* function defs */
/** @brief Allocate and initialize a new physical mapper.
*
* The user is responsible for freeing via stinger_physmap_delete().
*
* @return A new physical mapper.
*/
stinger_physmap_t *
stinger_physmap_create() {
stinger_physmap_t * out = malloc(sizeof(stinger_physmap_t));
if(out) {
out->vtxStackTop = 0;
out->keyTreeTop = 0;
allocateTreeNode(out, NULL, 0, '\0');
}
return out;
}
/** @brief Free a physical mapper.
*
* @param map The physical mapper to be freed.
*/
void
stinger_physmap_delete(stinger_physmap_t * map) {
free(map);
}
tree_node_t *
allocateTreeNode (stinger_physmap_t * map, tree_node_t * parent, int64_t depth, char value) {
int64_t myNode = stinger_int64_fetch_add(&(map->keyTreeTop),1);
if(map->keyTreeTop >= MAX_NODES) {
fprintf(stderr, "PHYSMAP: ERROR Out of treenodes\n");
return NULL;
}
memset(map->keyTree + myNode, 0, sizeof(tree_node_t));
map->keyTree[myNode].parent = parent;
map->keyTree[myNode].depth = depth;
map->keyTree[myNode].value = value;
return map->keyTree + myNode;
}
int
insertIntoTree(stinger_physmap_t * map, tree_node_t ** node, char * string, int64_t length) {
if(length == 0) {
if((*node)->isEndpoint)
return 1;
(*node)->isEndpoint = 1;
if(!stinger_int64_cas(&((*node)->vertexID), 0, MARKERINT)) {
return 0;
} else {
return 2;
}
} else {
if(!(*node)->children[(int)string[0]]) {
if(!stinger_ptr_cas((void **)&((*node)->children[(int)string[0]]), NULL, MARKER)) {
(*node)->children[(int)string[0]] = allocateTreeNode(map, *node, (*node)->depth+1, string[0]);
}
}
if(!(*node)->children[(int)string[0]]) {
return -1;
}
while((*node)->children[(int)string[0]] == MARKER) ;
(*node) = (*node)->children[(int)string[0]];
return insertIntoTree(map, node, ++string, --length);
}
}
/** @brief Create a new mapping from a binary data string to a vertex ID.
*
* This function will uniquely map an arbitrary binary string or character
* string to a vertex ID in the space of 0 to NV where NV is the number
* of unique strings that have been mapped thus far (in other words the vertex
* ID space is compact). It will return -1 on error or if the mapping already exists.
* It is safe to call this function in parallel with any other physical mapper function.
* To determine if a -1 result is from an error, call stinger_physmap_get_mapping()
* on the same string. If it also returns -1, then an error has occurred.
*
* @param map The physical mapper.
* @param string The binary or character data string.
* @param length The length of the string.
* @return A unique vertex ID or -1 if the mapping exists or an error occurs.
*/
int64_t
stinger_physmap_create_mapping (stinger_physmap_t * map, char * string, int64_t length) {
if(map->vtxStackTop == MAX_VTXID) {
fprintf(stderr, "PHYSMAP: ERROR Out of vertices\n");
return -1;
}
int64_t vertexID;
tree_node_t * node = map->keyTree;
int result = insertIntoTree(map, &node, string, length);
switch(result) {
case 0:
vertexID = stinger_int64_fetch_add(&(map->vtxStackTop), 1);
node->vertexID = vertexID;
map->vtxStack[vertexID] = node;
break;
case 2:
while(node->vertexID == MARKERINT) ;
vertexID = node->vertexID;
default:
return -1;
}
return vertexID;
}
/** @brief Lookup a mapping from a binary data string to a vertex ID.
*
* This function will lookup and return a previously created mapping. It will return -1
* if no mapping exists.
* It is safe to call this function in parallel with any other physical mapper function.
*
* @param map The physical mapper.
* @param string The binary or character data string.
* @param length The length of the string.
* @return A unique vertex ID or -1 if the mapping does not exist.
*/
int64_t
stinger_physmap_get_mapping (stinger_physmap_t * map, char * string, int64_t length) {
tree_node_t * cur = map->keyTree;
while(length > 0 && cur) {
cur = cur->children[(int)string[0]];
string++;
length--;
}
if(cur && cur->isEndpoint) {
return cur->vertexID;
} else {
return -1;
}
}
/** @brief Lookup the string mapped to a particular vertex ID.
*
* This function will lookup and return a previously created mapping. It will return -1
* no mapping exists or a reallocation of the output buffer fails. If the output buffer
* is not long enough, this function will reallocate the buffer and update the output buffer
* length.
* It is safe to call this function in parallel with any other physical mapper function.
*
* @param map The physical mapper.
* @param outbuffer A buffer to store the output string.
* @param outbufferlength The length of the buffer.
* @param vertexID The vertex ID to reverse lookup.
* @return 0 on success, -1 on failure.
*/
int
stinger_physmap_get_key (stinger_physmap_t * map, char ** outbuffer, int64_t * outbufferlength, int64_t vertexID) {
if(vertexID >= map->vtxStackTop || map->vtxStack[vertexID] == NULL || (!map->vtxStack[vertexID]->isEndpoint))
return -1;
tree_node_t * node = map->vtxStack[vertexID];
if(node->depth+1 > (*outbufferlength)) {
char * tmpbuffer = realloc(*outbuffer, sizeof(char) * (node->depth+1));
if(tmpbuffer) {
(*outbuffer) = tmpbuffer;
(*outbufferlength) = node->depth+1;
} else {
return -1;
}
}
(*outbuffer)[node->depth] = '\0';
while(node->parent) {
(*outbuffer)[node->depth-1] = node->value;
node = node->parent;
}
return 0;
}
int64_t
stinger_physmap_remove_mapping (stinger_physmap_t * map, int64_t vertexID) {
printf("***TODO***\n");
return -1;
}
/* Independent test-driver code */
#if PHYSMAP_TEST
#include <omp.h>
#include <sys/time.h>
struct timeval tv;
double firsttic = 0;
double lasttic = 0;
void tic_reset() {
gettimeofday(&tv, NULL);
firsttic = (double)tv.tv_sec + 1.0e-6 * (double)tv.tv_usec;
lasttic = firsttic;
}
double tic_total() {
gettimeofday(&tv, NULL);
lasttic = (double)tv.tv_sec + 1.0e-6 * (double)tv.tv_usec;
return lasttic - firsttic;
}
double tic_sincelast() {
gettimeofday(&tv, NULL);
double rtnval = ((double)tv.tv_sec + 1.0e-6 * (double)tv.tv_usec) - lasttic;
lasttic = (double)tv.tv_sec + 1.0e-6 * (double)tv.tv_usec;
return rtnval;
}
/*
* Parallel test driver code
*/
int main(int argc, char *argv[]) {
stinger_physmap_t * map = stinger_physmap_create();
if(!map) {
printf("ALLOC FAILED");
return 0;
}
if(argc < 2) {
return -1;
}
int threads = atoi(argv[1]);
omp_set_num_threads(threads);
int64_t lines_in_file = 0;
char ** strings;
int64_t * lengths;
float insertion, lookup, reverselookup;
#pragma omp parallel
{
#pragma omp master
{
printf("%d,", omp_get_num_threads());
FILE * fp = fopen(argv[2], "r");
char * string = malloc(100*sizeof(char));;
int64_t read = 0;
int bytes = 100;
while((read = getline(&string, &bytes, fp)) != EOF ) {
lines_in_file++;
}
free(string);
fclose(fp);
fp = fopen(argv[2], "r");
strings = malloc(lines_in_file * sizeof(char *));
lengths = malloc(lines_in_file * sizeof(int64_t));
for(int64_t i = 0; i < lines_in_file; ++i) {
string = malloc(100*sizeof(char));;
read = getline(&string, &bytes, fp);
strings[i] = string;
lengths[i] = read - 2;
}
printf("%d,",lines_in_file);
}
}
tic_reset();
#pragma omp parallel for
for(int64_t i = 0; i < lines_in_file; ++i) {
int64_t mapping = stinger_physmap_create_mapping(map, strings[i ], lengths[i ]);
}
insertion = tic_sincelast();
#pragma omp parallel for
for(int64_t i = 0; i < lines_in_file; ++i) {
int64_t mapping = stinger_physmap_get_mapping(map, strings[i ], lengths[i ]);
if(mapping == -1)
printf("lu %s %lu %lu\n", strings[i ], lengths[i ], mapping);
}
lookup = tic_sincelast();
#pragma omp parallel
{
char * string2 = malloc(sizeof(char) * 100);
tic_reset();
#pragma omp for
for(int64_t i = 0; i < map->vtxStackTop; ++i) {
int64_t slen2 = 100;
if(stinger_physmap_get_key(map, &string2, &slen2, i ))
printf("rlu %s %lu\n", string2, slen2);
}
}
reverselookup = tic_sincelast();
printf("%f,%f,%f,", insertion, lookup, reverselookup);
printf("%f,%f,%f,\n", lines_in_file/insertion, lines_in_file/lookup, lines_in_file/reverselookup);
stinger_physmap_delete(map);
}
#endif
|
normal.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads \
// RUN: | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
#include "callback.h"
int main() {
#pragma omp parallel num_threads(4)
{
print_ids(0);
print_ids(1);
}
print_fuzzy_address(1);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// Only check callback names, arguments are verified in THREADS below.
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// Note that we cannot ensure that the worker threads have already called
// barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end
// THREADS: 0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin
// THREADS-SAME: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin
// THREADS-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]]
// THREADS-SAME: parent_task_frame.exit=[[NULL]]
// THREADS-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// THREADS-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4
// THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]]
// THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1
// THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]]
// THREADS-SAME: task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: task level 0
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end
// parallel_id is 0 because the region ended in the barrier!
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin
// THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]]
// THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1
// THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]]
// THREADS-SAME: task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end
// parallel_id is 0 because the region ended in the barrier!
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin
// THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]]
// THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1
// THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]]
// THREADS-SAME: task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end
// parallel_id is 0 because the region ended in the barrier!
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin
// THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]]
// THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1
// THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]]
// THREADS-SAME: task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end
// parallel_id is 0 because the region ended in the barrier!
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
omp-nested-par.c | #include <stdio.h>
int main() {
#pragma omp parallel
{
printf("Hello World\n");
#pragma omp parallel
{
printf("nested parallel region\n");
}
}
return 0;
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction. They are
/// expected to atomically update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: Variable(Variable), PrivateVariable(PrivateVariable),
ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {}
/// Returns the type of the element being reduced.
Type *getElementType() const {
return Variable->getType()->getPointerElementType();
}
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param Variables A list of variables in which the reduction
/// results will be stored (values of pointer type).
/// \param PrivateVariables A list of variables in which the partial
/// reduction results are stored (values of pointer
/// type). Coindexed with Variables. Privatization
/// must be handled separately from this call.
/// \param ReductionGen A list of generators for non-atomic reduction
/// bodies. Each takes a pair of partially reduced
/// values and sets a new one.
/// \param AtomicReductionGen A list of generators for atomic reduction
/// bodies, empty if the reduction cannot be
/// performed with atomics. Each takes a pair of
/// _pointers_ to paritally reduced values and
/// atomically stores the result into the first.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
// Get the type corresponding to __kmpc_impl_lanemask_t from the deviceRTL
Type *getLanemaskType();
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArgs
/// \param MapnamesArg
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param TotalNbOperand Number of operand in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the master.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Preheader = nullptr;
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Body = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
BasicBlock *After = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const {
assert(isValid() && "Requires a valid canonical loop");
return Preheader;
}
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return Body;
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return After;
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
test.c | #include <stdio.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++)
a[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
GB_unaryop__identity_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_int16
// op(A') function: GB_tran__identity_fp64_int16
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_int16
(
double *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vector.c | #include "lib.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
void DENSEARRAY_SYMBOL(set_scalar_f32)(
float *dst,
size_t dim,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] = c;
}
}
void DENSEARRAY_SYMBOL(set_scalar_i32)(
int32_t *dst,
size_t dim,
int32_t c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] = c;
}
}
void DENSEARRAY_SYMBOL(copy_f32)(
float *dst,
size_t dim,
const float *src)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] = src[idx];
}
}
void DENSEARRAY_SYMBOL(cast_u8_to_f32)(
float *dst,
size_t dim,
const uint8_t *src)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] = (float)(src[idx]);
}
}
void DENSEARRAY_SYMBOL(square_f32)(
float *dst,
size_t dim)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
float y = dst[idx];
dst[idx] = y * y;
}
}
void DENSEARRAY_SYMBOL(cube_f32)(
float *dst,
size_t dim)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
float y = dst[idx];
dst[idx] = y * y * y;
}
}
void DENSEARRAY_SYMBOL(sqrt_f32)(
float *dst,
size_t dim)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
float y = dst[idx];
dst[idx] = sqrtf(y);
}
}
void DENSEARRAY_SYMBOL(reciprocal_f32)(
float *dst,
size_t dim)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
float y = dst[idx];
dst[idx] = 1.0f / y;
}
}
void DENSEARRAY_SYMBOL(add_scalar_f32)(
float *dst,
size_t dim,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] += c;
}
}
void DENSEARRAY_SYMBOL(scale_f32)(
float *dst,
size_t dim,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] *= c;
}
}
void DENSEARRAY_SYMBOL(div_scalar_f32)(
float *dst,
size_t dim,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] /= c;
}
}
void DENSEARRAY_SYMBOL(elem_mult_f32)(
float *dst,
size_t dim,
const float *xs)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] *= xs[idx];
}
}
void DENSEARRAY_SYMBOL(elem_div_f32)(
float *dst,
size_t dim,
const float *xs)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] /= xs[idx];
}
}
void DENSEARRAY_SYMBOL(elem_ldiv_f32)(
float *dst,
size_t dim,
const float *xs)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] = xs[idx] / dst[idx];
}
}
void DENSEARRAY_SYMBOL(vector_add_f32)(
float *dst,
size_t dim,
const float *xs,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] += c * xs[idx];
}
}
void DENSEARRAY_SYMBOL(vector_average_f32)(
float *dst,
size_t dim,
const float *xs,
float c)
{
#pragma omp parallel for
for (size_t idx = 0; idx < dim; idx++) {
dst[idx] += c * (xs[idx] - dst[idx]);
}
}
|
Fig_4.9_piparCpadSum.c | #include <stdio.h>
#include <omp.h>
#define NTHREADS 4
#define CBLK 8
static long num_steps = 100000000;
double step;
int main ()
{
int i, j, actual_nthreads;
double pi, start_time, run_time;
double sum[NTHREADS][CBLK]={0.0};
step = 1.0 / (double) num_steps;
omp_set_num_threads(NTHREADS);
start_time = omp_get_wtime();
#pragma omp parallel
{
int i;
int id = omp_get_thread_num();
int numthreads = omp_get_num_threads();
double x;
if (id == 0) actual_nthreads = numthreads;
for (i = id; i < num_steps; i += numthreads) {
x = (i + 0.5) * step;
sum[id][0] += 4.0 / (1.0 + x * x);
}
} // end of parallel region
pi = 0.0;
for (i = 0; i < actual_nthreads; i++)
pi += sum[i][0];
pi = step * pi;
run_time = omp_get_wtime() - start_time;
printf("\n pi is \%f in \%f seconds \%d thrds \n",
pi,run_time,actual_nthreads);
}
|
convolution_winograd_transform.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
const float bias0 = biasptr ? biasptr[p] : 0.f;
float tmp[6][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 1;
const float* output0_tm_1 = output0_tm_0 + tiles * 1;
const float* output0_tm_2 = output0_tm_0 + tiles * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 7;
// TODO sse optimize
for (int m = 0; m < 8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 8;
output0_tm_1 += tiles * 8;
output0_tm_2 += tiles * 8;
output0_tm_3 += tiles * 8;
output0_tm_4 += tiles * 8;
output0_tm_5 += tiles * 8;
output0_tm_6 += tiles * 8;
output0_tm_7 += tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4);
for (int m = 0; m < 6; m++)
{
float r00 = r0[0];
float r01 = r0[1];
float r02 = r0[2];
float r03 = r0[3];
float r04 = r0[4];
float r05 = r0[5];
float tmp0m = 4 * r00 - 5 * r02 + r04;
float tmp1m = -4 * (r01 + r02) + r04 + r03;
float tmp2m = 4 * (r01 - r02) + r04 - r03;
float tmp3m = -2 * (r01 - r03) + r04 - r02;
float tmp4m = 2 * (r01 - r03) + r04 - r02;
float tmp5m = 4 * r01 - 5 * r03 + r05;
tmp[0][m] = tmp0m;
tmp[1][m] = tmp1m;
tmp[2][m] = tmp2m;
tmp[3][m] = tmp3m;
tmp[4][m] = tmp4m;
tmp[5][m] = tmp5m;
r0 += w;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j);
float* r0_tm_1 = r0_tm_0 + tiles;
float* r0_tm_2 = r0_tm_0 + tiles * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 5;
for (int m = 0; m < 6; m++)
{
float tmp00 = tmp[m][0];
float tmp01 = tmp[m][1];
float tmp02 = tmp[m][2];
float tmp03 = tmp[m][3];
float tmp04 = tmp[m][4];
float tmp05 = tmp[m][5];
float r0tm0 = 4 * tmp00 - 5 * tmp02 + tmp04;
float r0tm1 = -4 * (tmp01 + tmp02) + tmp04 + tmp03;
float r0tm2 = 4 * (tmp01 - tmp02) + tmp04 - tmp03;
float r0tm3 = -2 * (tmp01 - tmp03) + tmp04 - tmp02;
float r0tm4 = 2 * (tmp01 - tmp03) + tmp04 - tmp02;
float r0tm5 = 4 * tmp01 - 5 * tmp03 + tmp05;
r0_tm_0[0] = r0tm0;
r0_tm_1[0] = r0tm1;
r0_tm_2[0] = r0tm2;
r0_tm_3[0] = r0tm3;
r0_tm_4[0] = r0tm4;
r0_tm_5[0] = r0tm5;
r0_tm_0 += tiles * 6;
r0_tm_1 += tiles * 6;
r0_tm_2 += tiles * 6;
r0_tm_3 += tiles * 6;
r0_tm_4 += tiles * 6;
r0_tm_5 += tiles * 6;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float bias0 = biasptr ? biasptr[p] : 0.f;
float tmp[4][6];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j);
const float* output0_tm_1 = output0_tm_0 + tiles;
const float* output0_tm_2 = output0_tm_0 + tiles * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 5;
float* output0 = out0.row(i * 4) + (j * 4);
for (int m = 0; m < 6; m++)
{
float out0tm0 = output0_tm_0[0];
float out0tm1 = output0_tm_1[0];
float out0tm2 = output0_tm_2[0];
float out0tm3 = output0_tm_3[0];
float out0tm4 = output0_tm_4[0];
float out0tm5 = output0_tm_5[0];
float tmp02a = out0tm1 + out0tm2;
float tmp13a = out0tm1 - out0tm2;
float tmp02b = out0tm3 + out0tm4;
float tmp13b = out0tm3 - out0tm4;
float tmp0m = out0tm0 + tmp02a + tmp02b;
float tmp1m = tmp13a + tmp13b * 2;
float tmp2m = tmp02a + tmp02b * 4;
float tmp3m = out0tm5 + tmp13a + tmp13b * 8;
tmp[0][m] = tmp0m;
tmp[1][m] = tmp1m;
tmp[2][m] = tmp2m;
tmp[3][m] = tmp3m;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
float tmp00 = tmp[m][0];
float tmp01 = tmp[m][1];
float tmp02 = tmp[m][2];
float tmp03 = tmp[m][3];
float tmp04 = tmp[m][4];
float tmp05 = tmp[m][5];
float tmp02a = tmp01 + tmp02;
float tmp13a = tmp01 - tmp02;
float tmp02b = tmp03 + tmp04;
float tmp13b = tmp03 - tmp04;
float out00 = bias0 + tmp00 + tmp02a + tmp02b;
float out01 = bias0 + tmp13a + tmp13b * 2;
float out02 = bias0 + tmp02a + tmp02b * 4;
float out03 = bias0 + tmp05 + tmp13a + tmp13b * 8;
output0[0] = out00;
output0[1] = out01;
output0[2] = out02;
output0[3] = out03;
output0 += outw;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_input_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 2;
const int h_tiles = (h - 2) / 2;
const int tiles = w_tiles * h_tiles;
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
// 0 = r00 - r02
// 1 = r01 + r02
// 2 = r02 - r01
// 3 = r03 - r01
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[4][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 2) + (j * 2);
for (int m = 0; m < 4; m++)
{
float r00 = r0[0];
float r01 = r0[1];
float r02 = r0[2];
float r03 = r0[3];
float tmp0m = r00 - r02;
float tmp1m = r01 + r02;
float tmp2m = r02 - r01;
float tmp3m = r03 - r01;
tmp[0][m] = tmp0m;
tmp[1][m] = tmp1m;
tmp[2][m] = tmp2m;
tmp[3][m] = tmp3m;
r0 += w;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j);
float* r0_tm_1 = r0_tm_0 + tiles;
float* r0_tm_2 = r0_tm_0 + tiles * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 3;
for (int m = 0; m < 4; m++)
{
float tmp00 = tmp[m][0];
float tmp01 = tmp[m][1];
float tmp02 = tmp[m][2];
float tmp03 = tmp[m][3];
float r0tm0 = tmp00 - tmp02;
float r0tm1 = tmp01 + tmp02;
float r0tm2 = tmp02 - tmp01;
float r0tm3 = tmp03 - tmp01;
r0_tm_0[0] = r0tm0;
r0_tm_1[0] = r0tm1;
r0_tm_2[0] = r0tm2;
r0_tm_3[0] = r0tm3;
r0_tm_0 += tiles * 4;
r0_tm_1 += tiles * 4;
r0_tm_2 += tiles * 4;
r0_tm_3 += tiles * 4;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 2;
const int h_tiles = outh / 2;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r00 + r01 + r02
// 1 = r01 - r02 + r03
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float bias0 = biasptr ? biasptr[p] : 0.f;
float tmp[2][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j);
const float* output0_tm_1 = output0_tm_0 + tiles;
const float* output0_tm_2 = output0_tm_0 + tiles * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 3;
float* output0 = out0.row(i * 2) + (j * 2);
for (int m = 0; m < 4; m++)
{
float out0tm0 = output0_tm_0[0];
float out0tm1 = output0_tm_1[0];
float out0tm2 = output0_tm_2[0];
float out0tm3 = output0_tm_3[0];
float tmp0m = out0tm0 + out0tm1 + out0tm2;
float tmp1m = out0tm1 - out0tm2 + out0tm3;
tmp[0][m] = tmp0m;
tmp[1][m] = tmp1m;
output0_tm_0 += tiles * 4;
output0_tm_1 += tiles * 4;
output0_tm_2 += tiles * 4;
output0_tm_3 += tiles * 4;
}
for (int m = 0; m < 2; m++)
{
float tmp00 = tmp[m][0];
float tmp01 = tmp[m][1];
float tmp02 = tmp[m][2];
float tmp03 = tmp[m][3];
float out00 = bias0 + tmp00 + tmp01 + tmp02;
float out01 = bias0 + tmp01 - tmp02 + tmp03;
output0[0] = out00;
output0[1] = out01;
output0 += outw;
}
}
}
}
}
|
GB_unaryop__one_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_uint32_uint32
// op(A') function: GB_tran__one_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_uint32_uint32
(
uint32_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isgt_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8)
// A*D function (colscale): GB (_AxD__isgt_uint8)
// D*A function (rowscale): GB (_DxB__isgt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8)
// C=scalar+B GB (_bind1st__isgt_uint8)
// C=scalar+B' GB (_bind1st_tran__isgt_uint8)
// C=A+scalar GB (_bind2nd__isgt_uint8)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_elimination_quasiincompresible_builder_and_solver.h | /* *********************************************************
*
* Last Modified by: $Author: anonymous $
* Date: $Date: 2009-01-15 14:50:24 $
* Revision: $Revision: 1.12 $
*
* ***********************************************************/
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #include "boost/smart_ptr.hpp"
#include <pybind11/pybind11.h>
#include "includes/define.h"
#include "includes/define_python.h"
/* Project includes */
#include "includes/define.h"
#include "ULF_application.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "utilities/geometry_utilities.h"
#include "boost/smart_ptr.hpp"
#include "utilities/timer.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template
<
class TSparseSpace,
class TDenseSpace ,
class TLinearSolver,
int TDim
>
class ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver
: public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesContainerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver() override {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_THROW_ERROR(std::runtime_error, "For the quasi incompressible builder and solver this fct doesnt exist!", "");
KRATOS_CATCH("")
}
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
//KRATOS_WATCH("Initialize Solution Step::: EMPTY FUNCTION FOR THIS SOLVER")
KRATOS_CATCH("")
}
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
//KRATOS_WATCH("Finalize Solution Step:::EMPTY FUNCTION FOR THIS SOLVER")
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
//this is done in a purely nodal way taking advantage of the neighbour relatinoships
//which are assumed to be calculated separately
//HERE we store the displacements variables in a list
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part
)
{
KRATOS_TRY
//KRATOS_WATCH("ENTERED SETUP DOFSET OF BUILDER AND SOLVER OF ULF")
mActiveNodes.clear();
mActiveNodes.reserve(r_model_part.Nodes().size() );
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
mActiveNodes.push_back(*(it.base() ));
}
}
//getting the dof position
//unsigned int dof_position = (mActiveNodes.begin())->GetDofPosition(PRESSURE);
//fills the DofList and give a unique progressive tag to each node
BaseType::mDofSet.clear();
BaseType::mDofSet.reserve(mActiveNodes.size()*TDim );
for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++)
{
BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_X).get());
BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Y).get());
//BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Y));
if (TDim==3)
BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Z).get());
}
this->mEquationSystemSize = BaseType::mDofSet.size();
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
//KRATOS_WATCH("FINISHED SETUP DOFSET OF BUILDER AND SOLVER OF ULF")
//BELOW IS THE OLD VERSION
/*
//count dofs
mnumber_of_active_nodes = 0;
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
mnumber_of_active_nodes += 1;
}
}
//getting the dof position
//unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(PRESSURE);
//fills the DofList
BaseType::mDofSet.clear();
BaseType::mDofSet.reserve( mnumber_of_active_nodes * TDim );
int FractionalStepNumber = r_model_part.GetProcessInfo()[FRACTIONAL_STEP];
KRATOS_WATCH(FractionalStepNumber);
if(TDim == 2)
{
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_X) );
BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Y) );
}
}
}
else if(TDim == 3)
{
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_X) );
BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Y) );
BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Z) );
}
}
}
//before it was like that:
//this->mEquationSystemSize = rDofSet.size();
this->mEquationSystemSize = BaseType::mDofSet.size();
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
*/
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
//this function numbers the DOFS - from 1 onwards... (note, that only DISPLACEMENT DOFs are stored, PRESSURE is not!!!!)
void SetUpSystem(
ModelPart& r_model_part
)
{
KRATOS_TRY
//assing id to the nodes
unsigned int index = 0;
for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof)
{
//i_dof->EquationId() = index;
i_dof->SetEquationId(index) ;
index++;
}
KRATOS_CATCH("");
}
//**************************************************************************
//**************************************************************************
//
void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
TSystemMatrixType& mD,
TSystemVectorType& Dx,
TSystemVectorType& b,
TSystemMatrixType& mMconsistent,
TSystemVectorType& mMdiagInv,
ModelPart& rModelPart
)
{
KRATOS_TRY
//resizing the system vectors and matrix
if (A.size1() == 0 || this->GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(this->mEquationSystemSize,this->mEquationSystemSize,false);
//ConstructMatrixStructure(A);
}
if(Dx.size() != this->mEquationSystemSize)
Dx.resize(this->mEquationSystemSize,false);
if(b.size() != this->mEquationSystemSize)
b.resize(this->mEquationSystemSize,false);
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
//resize auxiliaries
unsigned int reduced_dim = this->mEquationSystemSize / TDim;
if(mD.size1() != reduced_dim)
mD.resize(reduced_dim,this->mEquationSystemSize,false);
if(mMconsistent.size1() != reduced_dim)
mMconsistent.resize(reduced_dim,reduced_dim,false);
if(mMdiagInv.size() != reduced_dim )
mMdiagInv.resize(reduced_dim,false);
KRATOS_CATCH("")
}
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
#ifndef _OPENMP
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
#else
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
int A_size = A.size1();
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
//KRATOS_WATCH(number_of_threads);
//KRATOS_WATCH(element_partition);
double start_prod = omp_get_wtime();
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
/*
double aaaa = TSparseSpace::TwoNorm(b);
if (TSparseSpace::TwoNorm(b) == aaaa + 1000000000000000000.0)
{
KRATOS_WATCH((*it)->Id())
KRATOS_THROW_ERROR(std::logic_error, "Something is wrong: fluid element cannot have all 3 nodes at the FSI boundary " , "");
}
*/
// clean local elemental memory
pScheme->CleanMemory(*it);
// #pragma omp critical
// {
// //assemble the elemental contribution
// AssembleLHS(A,LHS_Contribution,EquationId);
// AssembleRHS(b,RHS_Contribution,EquationId);
//
// // clean local elemental memory
// pScheme->CleanMemory(*it);
// }
}
}
//KRATOS_WATCH("Finished assembling of builder and solver")
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
// #pragma omp critical
// {
// //assemble the elemental contribution
// AssembleLHS(A,LHS_Contribution,EquationId);
// AssembleRHS(b,RHS_Contribution,EquationId);
// }
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
//KRATOS_WATCH("finished parallel building");
// //ensure that all the threads are syncronized here
// #pragma omp barrier
#endif
KRATOS_CATCH("")
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
public:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
TSystemMatrixType mD;
TSystemMatrixType mMconsistent;
TSystemVectorType mMdiagInv;
TSystemVectorType mpreconditioner;
unsigned int mnumber_of_active_nodes;
GlobalPointersVector<Node<3> > mActiveNodes;
//private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
// GlobalPointersVector<Node<3> > mActiveNodes;
/*@} */
/**@name Private Operations*/
/*@{ */
//**************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
#ifdef _OPENMP
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
omp_unset_lock(&lock_array[i_global]);
}
//note that computation of reactions is not performed here!
}
}
void AssembleRHS_parallel(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
omp_unset_lock(&lock_array[i_global]);
}
//note that computation of reactions is not performed here!
}
}
#endif
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
unsigned int j_global=EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on all DOFS
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
//KRATOS_WATCH(b);
TSparseSpace::SetToZero(b);
//KRATOS_WATCH("Calculating REACTIONSSSSSSSS")
//reset the reactions to zero in all the nodes
for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator)
{
node_iterator->FastGetSolutionStepValue(REACTION_X)=0.0;
node_iterator->FastGetSolutionStepValue(REACTION_Y)=0.0;
node_iterator->FastGetSolutionStepValue(REACTION_Z)=0.0;
}
//refresh RHS to have the correct reactions
BuildRHS(pScheme,r_model_part,b);
//KRATOS_WATCH(b)
/*
for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator)
{
//not adding thelonely nodes:
if( node_iterator->FastGetSolutionStepValue(IS_INTERFACE)==1.0 )
{
//we add one because we have to account for the contribution of the node itself
unsigned int eq_id=(node_iterator->GetDof(DISPLACEMENT_X)).EquationId();
node_iterator->FastGetSolutionStepValue(REACTION_X)=b[eq_id];
eq_id=(node_iterator->GetDof(DISPLACEMENT_Y)).EquationId();
node_iterator->FastGetSolutionStepValue(REACTION_Y)=b[eq_id];
}
}
*/
//array_1d<double, 3> ReactionsVec;
typename DofsArrayType::ptr_iterator it2;
for (it2=BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2)
{
//JUST FOR ONE EXAMPLE - Turek (otherwise the below is correct)
if ( (*it2)->IsFixed() )
{
unsigned int eq_id=(*it2)->EquationId();
//KRATOS_WATCH(eq_id)
//KRATOS_WATCH(b[eq_id])
(*it2)->GetSolutionStepReactionValue() = b[eq_id];
//KRATOS_WATCH((*it2)->GetSolutionStepReactionValue())
}
//
}
}
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
#ifndef _OPENMP
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
#else
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(b.size());
int b_size = b.size();
for (int i = 0; i < b_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
//KRATOS_WATCH(number_of_threads);
//KRATOS_WATCH(element_partition);
double start_prod = omp_get_wtime();
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS_parallel(b, RHS_Contribution, EquationId, lock_array);
/*
double aaaa = TSparseSpace::TwoNorm(b);
if (TSparseSpace::TwoNorm(b) == aaaa + 1000000000000000000.0)
{
KRATOS_WATCH((*it)->Id())
KRATOS_THROW_ERROR(std::logic_error, "Something is wrong: fluid element cannot have all 3 nodes at the FSI boundary " , "");
}
*/
// clean local elemental memory
pScheme->CleanMemory(*it);
// #pragma omp critical
// {
// //assemble the elemental contribution
// AssembleLHS(A,LHS_Contribution,EquationId);
// AssembleRHS(b,RHS_Contribution,EquationId);
//
// // clean local elemental memory
// pScheme->CleanMemory(*it);
// }
}
}
//KRATOS_WATCH("Finished assembling of builder and solver")
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS_parallel(b, RHS_Contribution, EquationId, lock_array);
// #pragma omp critical
// {
// //assemble the elemental contribution
// AssembleLHS(A,LHS_Contribution,EquationId);
// AssembleRHS(b,RHS_Contribution,EquationId);
// }
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < b_size; i++)
omp_destroy_lock(&lock_array[i]);
//KRATOS_WATCH("finished parallel building");
// //ensure that all the threads are syncronized here
// #pragma omp barrier
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ConstructMatrixStructure(
TSystemMatrixType& A, ModelPart& r_model_part
)
{
KRATOS_TRY
//KRATOS_WATCH("Started constructing MAT STRUC")
std::vector<int> indices;
indices.reserve(1000);
//count non zeros
int total_nnz = 0;
for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator)
{
//not adding thelonely nodes:
if( (node_iterator->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
//we add one because we have to account for the contribution of the node itself
total_nnz +=1+(node_iterator->GetValue(NEIGHBOUR_NODES)).size();
}
}
//reserve space in the matrix
A.reserve(total_nnz* TDim * TDim,false);
unsigned int row_index;
//fill the matrix row by row
unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X);
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
GlobalPointersVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES);
if( neighb_nodes.size() != 0 )
{
//first row in the block
row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId();
//add id of the current node
//NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential
for(unsigned int kk = 0; kk<TDim; kk++)
{
indices.push_back(row_index + kk);
}
//filling and order the first neighbours list
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId();
for(unsigned int kk = 0; kk<TDim; kk++)
{
indices.push_back(tmp + kk);
}
}
std::sort(indices.begin(),indices.end());
//fill in the system matrix A
for(unsigned int kk = 0; kk<TDim; kk++)
{
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(row_index + kk,indices[j] , 0.00);
}
}
//clean the indices (it is a work array)
indices.erase(indices.begin(),indices.end());
}
}
//KRATOS_WATCH("FINISHED constructing MAT STRUC")
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ConstructMatrixStructure_Mconsistent(
TSystemMatrixType& Mconsistent, ModelPart& r_model_part)
{
KRATOS_TRY
//KRATOS_WATCH("Started constructing MAT STRUC M CONSISTENT")
std::vector<int> indices;
indices.reserve(1000);
//KRATOS_WATCH("contruct matrix structure Mconsistent 0")
int total_nnz = 0;
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
//not to do include lonely nodes in the system
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
//we add one because we have to account for the contribution of the node itself
total_nnz += 1+(it->GetValue(NEIGHBOUR_NODES)).size();
}
}
Mconsistent.reserve(total_nnz,false);
unsigned int row_index;
//fill the matrix row by row
unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X);
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
GlobalPointersVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES);
if( neighb_nodes.size() != 0 )
{
//first row in the block
row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId();
//add id of the current node
//NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential
//we store in the array of indices the column numbers of the pressure index of the respective node, which coincides
//with the index of DISP_X, divided by TDim (pressure is scalar - no need to store 2 more indices, as it was in
//the case of vector (displ)
//CHECK THIS!!!
//indices.push_back(row_index/3.0);
indices.push_back(row_index/TDim);
//filling and order the first neighbours list
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId();
indices.push_back(tmp/TDim);
}
std::sort(indices.begin(),indices.end());
//fill M (the consistent mass matrix)-note that the "pressure index" is assumed to concide the DISPLACEMENT_X index divided by 3
for(unsigned int j=0; j<indices.size(); j++)
{
Mconsistent.push_back(row_index/TDim, indices[j] , 0.00);
//KRATOS_WATCH(Mconsistent)
}
//clean the indices (it is a work array)
indices.erase(indices.begin(),indices.end());
}
}
//KRATOS_WATCH("FInished constructing MAT STRUC M CONSISTENT")
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ConstructMatrixStructure_DivergenceMatrixD(
TSystemMatrixType& mD, ModelPart& r_model_part)
{
KRATOS_TRY
//KRATOS_WATCH("Started constructing MAT STRUC Divergence Matrix")
std::vector<int> indices;
indices.reserve(1000);
//count non zeros
int total_nnz = 0;
for (typename NodesContainerType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
//not to add lonely nodes to the system
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
//we add one because we have to account for the contribution of the node itself
total_nnz += 1 + (it->GetValue(NEIGHBOUR_NODES)).size();
}
}
mD.reserve(total_nnz* TDim,false);
unsigned int row_index;
//fill the matrix row by row
unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X);
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
GlobalPointersVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES);
if( neighb_nodes.size() != 0 )
{
//first row in the block
row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId();
//add id of the current node
//NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential
for(unsigned int kk = 0; kk<TDim; kk++)
{
indices.push_back(row_index + kk);
}
//filling and order the first neighbours list
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId();
for(unsigned int kk = 0; kk<TDim; kk++)
{
indices.push_back(tmp + kk);
}
}
std::sort(indices.begin(),indices.end());
//fill D (the divergence matrix) - note that the "pressure index" is assumed to concide the DISPLACEMENT_X index divided by 3
for(unsigned int j=0; j<indices.size(); j++)
{
mD.push_back(row_index/TDim, indices[j] , 0.00);
}
//clean the indices (it is a work array)
indices.erase(indices.begin(),indices.end());
}
}
//KRATOS_WATCH("FSI D")
//KRATOS_WATCH(mD)
//KRATOS_WATCH("Finished constructing MAT STRUC Divergence Matrix")
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildAuxiliaries(
TSystemMatrixType& mD,TSystemMatrixType& Mconsistent, TSystemVectorType& mMdiagInv,
ModelPart& r_model_part)
{
KRATOS_TRY
//KRATOS_WATCH("BUILDING AUXILIARY MATRIX D")
//array_1d<double,TDim+1> rhs_contribution;
#ifndef _OPENMP
// BoundedMatrix::BoundedMatrix<double,TDim+1,TDim> DN_DX;
BoundedMatrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
double Volume;
double temp;
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
double aaa = 1.0/(TDim+1.0);
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin();
i!=r_model_part.ElementsEnd(); i++)
{
Geometry< Node<3> >& geom = i->GetGeometry();
//counting the n-r of structure nodes
unsigned int str_nr=0;
//for (int k = 0;k<TDim+1;k++)
for (unsigned int k = 0; k<geom.size(); k++)
{
str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
///////////////////////////////////////////////////////////////////////////////////////////////
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
// that means, that the entries corresponding to the structural elements are zero
///////////////////////////////////////////////////////////////////////////////////////////////
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
//building matrix D (transpose of the gradient integrated by parts)
temp = Volume*aaa;
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op
//KRATOS_WATCH(row_index)
//first write the lumped mass matrix
mMdiagInv[row_index] += temp;
for(unsigned int col = 0; col<TDim+1; col++)
{
for(unsigned int kkk = 0; kkk<TDim; kkk++)
{
//check if the below is correct (copied it from Mass matrix)
unsigned int col_index = local_indices[col]+kkk;
//unsigned int col_index = col + kkk;
//FIRST THE DIVERGENCE MATRIX
mD(row_index,col_index) += temp * DN_DX(col,kkk);
//And now the consistent mass matrix
if (row_index==col_index)
{
//Mconsistent(row_index,col_index) += temp * 2.0;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp * 2.0;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp * 2.0*2.5;
}
else
{
//Mconsistent(row_index,col_index) += temp ;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp ;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp*0.0 ;
}
}
}
}
}
}
#else
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(mD.size1());
int D_size = mD.size1();
for (int i = 0; i < D_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, r_model_part.Elements().size(), element_partition);
//KRATOS_WATCH(number_of_threads);
//KRATOS_WATCH(element_partition);
double start_prod = omp_get_wtime();
//#pragma omp parallel for private (DN_DX, N, local_indices, Volume, temp, aaa, dof_position)
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
BoundedMatrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
//array_1d<double,TDim+1> rhs_contribution;
double Volume;
double temp;
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
double aaa = 1.0/(TDim+1.0);
//Element::EquationIdVectorType EquationId;
//ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = r_model_part.Elements().ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = r_model_part.Elements().ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator i = it_begin; i != it_end; ++i)
{
Geometry< Node<3> >& geom = (*i)->GetGeometry();
//counting the n-r of structure nodes
unsigned int str_nr=0;
//for (int k = 0;k<TDim+1;k++)
for (unsigned int k = 0; k<geom.size(); k++)
{
str_nr+=(unsigned int)((*i)->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
///////////////////////////////////////////////////////////////////////////////////////////////
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
// that means, that the entries corresponding to the structural elements are zero
///////////////////////////////////////////////////////////////////////////////////////////////
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
//building matrix D (transpose of the gradient integrated by parts)
temp = Volume*aaa;
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op
mMdiagInv[row_index] += temp;
omp_set_lock(&lock_array[row_index]);
//first write the lumped mass matrix
//KRATOS_WATCH(row_index)
for(unsigned int col = 0; col<TDim+1; col++)
{
unsigned int col_index = local_indices[col] /(TDim);
if (row_index==col_index)
{
//Mconsistent(row_index,col_index) += temp * 2.0;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp * 2.0;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp * 2.0;
}
else
{
//Mconsistent(row_index,col_index) += temp ;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp ;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp ;
}
for(unsigned int kkk = 0; kkk<TDim; kkk++)
{
//check if the below is correct (copied it from Mass matrix)
unsigned int col_index = local_indices[col]+kkk;
//unsigned int col_index = col + kkk;
//FIRST THE DIVERGENCE MATRIX
mD(row_index,col_index) += temp * DN_DX(col,kkk);
//And now the consistent mass matrix
}
}
omp_unset_lock(&lock_array[row_index]);
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < D_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
//this will be done sequentially in any case
//inverting the lumped mass matrix
for(unsigned int i = 0; i<TSparseSpace::Size(mMdiagInv); i++)
{
if (mMdiagInv[i]>1e-26)
mMdiagInv[i] = 1.0/mMdiagInv[i];
else //if (mMdiagInv[i]==0.0)
{
//KRATOS_WATCH(mMdiagInv[i])
//KRATOS_THROW_ERROR(std::logic_error,"something is wrong with the mass matrix entry - ZERO!!!","")
mMdiagInv[i] = 1000000000000.0;
//KRATOS_WATCH(mMdiagInv[i])
//KRATOS_THROW_ERROR(std::logic_error,"Zero ELEMENT VOLUMEE!!!!!!!!!!!!!!","")
//mMdiagInv[i] = 0.0;
}
}
//KRATOS_WATCH("FINISHED BUILDING AUXILIARY MATRIX D")
KRATOS_CATCH (" ")
}
/*
void BuildAuxiliaries(
TSystemMatrixType& mD,
ModelPart& r_model_part)
{
KRATOS_TRY
//KRATOS_WATCH("BUILDING AUXILIARY MATRIX D")
boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
//array_1d<double,TDim+1> rhs_contribution;
double Volume;
double temp;
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
double aaa = 1.0/(TDim+1.0);
#ifndef _OPENMP
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin();
i!=r_model_part.ElementsEnd(); i++)
{
Geometry< Node<3> >& geom = i->GetGeometry();
//counting the n-r of structure nodes
unsigned int str_nr=0;
//for (int k = 0;k<TDim+1;k++)
for (unsigned int k = 0;k<geom.size();k++)
{
str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
///////////////////////////////////////////////////////////////////////////////////////////////
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
// that means, that the entries corresponding to the structural elements are zero
///////////////////////////////////////////////////////////////////////////////////////////////
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
//building matrix D (transpose of the gradient integrated by parts)
temp = Volume*aaa;
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op
//KRATOS_WATCH(row_index)
for(unsigned int col = 0; col<TDim+1; col++)
{
for(unsigned int kkk = 0; kkk<TDim; kkk++)
{
//check if the below is correct (copied it from Mass matrix)
unsigned int col_index = local_indices[col]+kkk;
//unsigned int col_index = col + kkk;
mD(row_index,col_index) += temp * DN_DX(col,kkk);
}
}
}
}
}
#else
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(mD.size1());
int D_size = mD.size1();
for (int i = 0; i < D_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, r_model_part.Elements().size(), element_partition);
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
double start_prod = omp_get_wtime();
//#pragma omp parallel for private (DN_DX, N, local_indices, Volume, temp, aaa, dof_position)
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
//array_1d<double,TDim+1> rhs_contribution;
double Volume;
double temp;
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
double aaa = 1.0/(TDim+1.0);
//Element::EquationIdVectorType EquationId;
//ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = r_model_part.Elements().ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = r_model_part.Elements().ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator i = it_begin; i != it_end; ++i)
{
Geometry< Node<3> >& geom = (*i)->GetGeometry();
//counting the n-r of structure nodes
unsigned int str_nr=0;
//for (int k = 0;k<TDim+1;k++)
for (unsigned int k = 0;k<geom.size();k++)
{
str_nr+=(unsigned int)((*i)->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
///////////////////////////////////////////////////////////////////////////////////////////////
//if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing
// that means, that the entries corresponding to the structural elements are zero
///////////////////////////////////////////////////////////////////////////////////////////////
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
//building matrix D (transpose of the gradient integrated by parts)
temp = Volume*aaa;
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op
omp_set_lock(&lock_array[row_index]);
//KRATOS_WATCH(row_index)
for(unsigned int col = 0; col<TDim+1; col++)
{
for(unsigned int kkk = 0; kkk<TDim; kkk++)
{
//check if the below is correct (copied it from Mass matrix)
unsigned int col_index = local_indices[col]+kkk;
//unsigned int col_index = col + kkk;
mD(row_index,col_index) += temp * DN_DX(col,kkk);
}
}
omp_unset_lock(&lock_array[row_index]);
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < D_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
//KRATOS_WATCH("FINISHED BUILDING AUXILIARY MATRIX D")
KRATOS_CATCH (" ")
}
*/
//**************************************************************************
//**************************************************************************
//
//assembles consistent and lumped mass matrices
void AssembleMassMatrices(TSystemMatrixType& Mconsistent, TSystemVectorType& mMdiagInv, ModelPart& r_model_part)
{
//first we assemble the diagonal mass matrix
KRATOS_TRY
//KRATOS_WATCH("BUILDING MASS MATRICES ")
BoundedMatrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
//array_1d<double,TDim+1> rhs_contribution;
double Volume;
double temp;
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
double aaa = 1.0/(TDim+1.0);
for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin();
i!=r_model_part.ElementsEnd(); i++)
{
Geometry< Node<3> >& geom = i->GetGeometry();
//counting number of structural nodes
unsigned int str_nr=0;
//for (int k = 0;k<TDim+1;k++)
for (unsigned int k = 0; k<geom.size(); k++)
{
str_nr+=int(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
//we do not do anything for the elements of the structure (all nodes are IS_STR)
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
temp = Volume*aaa;
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim);
mMdiagInv[row_index] += temp;
}
}
}
//KRATOS_WATCH(mMdiagInv)
//inverting the mass matrix
for(unsigned int i = 0; i<TSparseSpace::Size(mMdiagInv); i++)
{
if (mMdiagInv[i]>1e-26)
mMdiagInv[i] = 1.0/mMdiagInv[i];
else //if (mMdiagInv[i]==0.0)
{
//KRATOS_WATCH(mMdiagInv[i])
//KRATOS_THROW_ERROR(std::logic_error,"something is wrong with the mass matrix entry - ZERO!!!","")
mMdiagInv[i] = 1000000000000.0;
//KRATOS_WATCH(mMdiagInv[i])
//KRATOS_THROW_ERROR(std::logic_error,"Zero ELEMENT VOLUMEE!!!!!!!!!!!!!!","")
//mMdiagInv[i] = 0.0;
}
}
//KRATOS_WATCH(mMdiagInv)
//AND NOW WE BUILD THE CONSISTENT MASS MATRIX
for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin();
i!=r_model_part.ElementsEnd(); i++)
{
Geometry< Node<3> >& geom = i->GetGeometry();
unsigned int str_nr=0;
for (unsigned int k = 0; k<i->GetGeometry().size(); k++)
{
str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
if (geom.size()!=str_nr)
{
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
if (Volume<0)
Volume*=-1.0;
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
temp = Volume*aaa;
//element mass matrix has a shape:
// 2 1 1
// A/12.0* 1 2 1 in 2D
// 1 1 2
//
// and
//
// 2 1 1 1
// V/20.0* 1 2 1 1 in 3D
// 1 1 2 1
// 1 1 1 2
//nothing should be added in case of membrane
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim); //pressure is a scalar=>matrix size is Tdim times smaller than for vector
for(unsigned int col = 0; col<TDim+1; col++)
{
unsigned int col_index = local_indices[col] /(TDim);
if (row_index==col_index)
{
//Mconsistent(row_index,col_index) += temp * 2.0;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp * 2.0;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp * 2.0;
}
else
{
//Mconsistent(row_index,col_index) += temp ;
if (TDim==2)
Mconsistent(row_index,col_index) += 0.25*temp ;
else if (TDim==3)
Mconsistent(row_index,col_index) += 0.2*temp;
}
}
}
}
}
// KRATOS_WATCH("FINISHED BUILDING MASS MATRICES ")
KRATOS_CATCH("")
}
//output += trans(input)*input
//
void calc_prod_precond_vec( TSystemVectorType& vec,
TSystemVectorType& precond,
TSystemVectorType& result)
{
KRATOS_TRY
if ( precond.size()!=vec.size() )
KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","")
if ( precond.size()!=result.size() )
KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","")
TSparseSpace::SetToZero(result);
//typedef unsigned int size_type;
//typedef double value_type;
//KRATOS_WATCH(precond)
#pragma omp parallel for
for (int i=0; i<static_cast<int>(precond.size()); i++)
{
result[i]=precond[i]*vec[i];
}
KRATOS_CATCH("");
}
void calc_GMinvD_prod(TSystemMatrixType& mD,
TSystemVectorType& Minv,
TSystemVectorType& x,
TSystemVectorType& WorkArray,
TSystemVectorType& destination)
{
KRATOS_TRY
//KRATOS_WATCH("COMPUTING GM-1D")
//typedef unsigned int size_type;
//typedef double value_type;
TSparseSpace::SetToZero(WorkArray);
//TSparseSpace::SetToZero(destination);
//WorkArray = D * x
TSparseSpace::Mult(mD, x, WorkArray);
//KRATOS_WATCH(WorkArray)
//WorkArray = Minv * WorkArray
#pragma omp parallel for
for(int i=0; i<static_cast<int>(WorkArray.size()); i++)
{
WorkArray[i] *= Minv[i];
}
//destination = trans(D) * WorkArray
//TSparseSpace::TransposeMult(D, x, WorkArray);
TSparseSpace::TransposeMult(mD, WorkArray, destination);
//KRATOS_WATCH(destination)
//KRATOS_WATCH("FINISHED COMPUTING GM-1D")
KRATOS_CATCH("");
}
//*************************************************************************************************
//*************************************************************************************************
void ReturnDx( TSystemVectorType& Dx, TSystemVectorType& xi)
{
KRATOS_TRY
//TSparseSpace::SetToZero(Dx);
if ( Dx.size()!=xi.size() )
KRATOS_THROW_ERROR(std::logic_error,"Dx and xi sizes mismatch","")
Dx=xi;
KRATOS_CATCH("");
}
//*************************************************************************************************
//*************************************************************************************************
void CalculatePreconditionerDiagonalMatrix(const TSystemMatrixType& D,
const TSystemVectorType& Minv,
const TSystemMatrixType& A,
TSystemVectorType& preconditioner)
{
KRATOS_TRY
//KRATOS_WATCH("COMPUTING preconditioner")
typedef unsigned int size_type;
typedef double value_type;
TSparseSpace::SetToZero(preconditioner);
if ( preconditioner.size()!=A.size1() )
KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","")
//get diagonal of matrix A
for(unsigned int i = 0; i<A.size1(); i++)
{
preconditioner[i] = A(i,i);
}
//TSparseSpace::SetToZero(preconditioner);
//calculate and add diagonal of G*Minv*D
//using that G*Minv*D(i,i) = D_k
for (size_type k = 0; k < D.size1 (); ++ k)
{
size_type begin = D.index1_data () [k];
size_type end = D.index1_data () [k + 1];
for (size_type i = begin; i < end; ++ i)
{
unsigned int index_i = D.index2_data () [i];
value_type data_i = D.value_data()[i];
preconditioner[index_i] += Minv[k]*data_i*data_i;
}
}
//KRATOS_WATCH(preconditioner)
//invert the preconditioner matrix
for(unsigned int i = 0; i<A.size1(); i++)
{
if (fabs(preconditioner[i])>1e-26)
//preconditioner[i] = 1.00/preconditioner[i];
preconditioner[i] = 1.00/preconditioner[i];
else
preconditioner[i] = 1000000000000000000.0;
if (preconditioner[i]<0.0)
preconditioner[i]*=-10000000000000000000.0;
//preconditioner[i]*=1000000000000000000.0;
/*
if (preconditioner[i]<0.0)
{
//preconditioner[i]=1.0;
KRATOS_THROW_ERROR(std::logic_error,"NEGATIVE PRECONDITIONER","")
}
*/
}
//KRATOS_WATCH("Finished COMPUTING preconditioner")
KRATOS_CATCH("");
}
//*************************************************************************************************
//*************************************************************************************************
bool ConvergenceCheck (TSystemVectorType& residual, TSystemVectorType& b, const double& tolerance, const int& iter_number, const int& max_iter_number)
{
//const DataType abs_toll = DataType(1e-15);
//
//absolute tolerance = 1e-15
//
if (iter_number>max_iter_number)
KRATOS_THROW_ERROR(std::logic_error,"MAX NUMBER OF ITERATIONS EXCEEDED, UR CG DIDNT CONVERGE","")
if (TSparseSpace::TwoNorm(residual)<1e-15)
return true;
else
{
const double& ratio = TSparseSpace::TwoNorm(residual)/TSparseSpace::TwoNorm(b);
//KRATOS_WATCH(ratio)
return( (ratio) < tolerance);
}
}
//*************************************************************************************************
//*************************************************************************************************
void ModifyForDirichlet (TSystemMatrixType& A, TSystemVectorType& b)
{
KRATOS_TRY
double large_number = 1e20;
for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof)
{
if(i_dof->IsFixed() == true)
{
unsigned int eq_id = i_dof->EquationId();
A(eq_id,eq_id) += large_number;
//b[eq_id] = 0.0001;
}
}
KRATOS_CATCH("");
}
void CalculateNodalPressureForce (TSystemMatrixType& mD,TSystemVectorType& mMdiagInv,ModelPart& r_model_part)
{
KRATOS_TRY
int i=0;
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
const int size = TSparseSpace::Size(mMdiagInv);
TSystemVectorType p(size);
TSystemVectorType f_p(3*size);
i=0;
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
i=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim;
p[i]=in->FastGetSolutionStepValue(PRESSURE);
}
}
TSparseSpace::TransposeMult(mD, p, f_p);
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
in->FastGetSolutionStepValue(FORCE_X)=f_p[in->GetDof(DISPLACEMENT_X,dof_position).EquationId()];
in->FastGetSolutionStepValue(FORCE_Y)=f_p[in->GetDof(DISPLACEMENT_Y,dof_position).EquationId()];
in->FastGetSolutionStepValue(FORCE_Z)=f_p[in->GetDof(DISPLACEMENT_Z,dof_position).EquationId()];
}
}
KRATOS_CATCH("");
}
void ComputePressureAtFreeSurface (ModelPart& r_model_part, double bulk_modulus, double density)
{
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
if (in->FastGetSolutionStepValue(IS_FLUID)==1.0 && in->FastGetSolutionStepValue(IS_FREE_SURFACE)==1.0)
{
//KRATOS_WATCH("Computing pressure at a free surface node")
in->FastGetSolutionStepValue(PRESSURE)=bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA));
//=in->FastGetSolutionStepValue(PRESSURE,1)+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA));
}
}
}
}
///////////////////////////////////////////////////////////////////////////
/*
void CalculateLupmedMass(ModelPart& model_part)
{
KRATOS_TRY
double dummy=0.0;
ProcessInfo& proc_info = model_part.GetProcessInfo();
for (typename ModelPart::ElementsContainerType::iterator im=model_part.ElementsBegin(); im!=model_part.ElementsEnd(); ++im)
{
im->Calculate(NODAL_MASS, dummy, proc_info);
}
KRATOS_CATCH("");
}
*/
void SavePressureIteration(ModelPart& model_part)
{
KRATOS_TRY
double pres=0.0;
for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it)
{
pres=it->FastGetSolutionStepValue(PRESSURE);
it->FastGetSolutionStepValue(PRESSURE_OLD_IT)=pres;
}
KRATOS_CATCH("");
}
///////////////// this is a function for performing the projection step of the ULF-FRAC method
void FractionalStepProjection(ModelPart& model_part, double alpha_bossak)
{
KRATOS_TRY
// double aaa=0.0;
double dt = model_part.GetProcessInfo()[DELTA_TIME];
BoundedMatrix<double,3,2> DN_DX;
array_1d<double,3> N;
array_1d<double,3> aux0, aux1, aux2; //this are sized to 3 even in 2D!!
//reset the auxilliary vector
for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it)
{
it->FastGetSolutionStepValue(VAUX)=ZeroVector(3);
}
//calculate the velocity correction and store it in VAUX
for (typename ModelPart::ElementsContainerType::iterator im=model_part.ElementsBegin(); im!=model_part.ElementsEnd(); ++im)
{
//get the list of nodes of the element
Geometry< Node<3> >& geom = im->GetGeometry();
double volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, volume);
array_1d<double,3> pres_inc;
//pres_inc[0] = geom[0].FastGetSolutionStepValue(PRESSURE,1)-geom[0].FastGetSolutionStepValue(PRESSURE);
//pres_inc[1] = geom[1].FastGetSolutionStepValue(PRESSURE,1)-geom[1].FastGetSolutionStepValue(PRESSURE);
//pres_inc[2] = geom[2].FastGetSolutionStepValue(PRESSURE,1)-geom[2].FastGetSolutionStepValue(PRESSURE);
pres_inc[0] = geom[0].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[0].FastGetSolutionStepValue(PRESSURE);
pres_inc[1] = geom[1].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[1].FastGetSolutionStepValue(PRESSURE);
pres_inc[2] = geom[2].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[2].FastGetSolutionStepValue(PRESSURE);
//KRATOS_WATCH(pres_inc[0])
//KRATOS_WATCH(pres_inc[1])
//KRATOS_WATCH(pres_inc[2])
//Riccardo's modification: multiply the G(p_n+1-p_n) by 1/2
//pres_inc*=0.5;
//Gradient operator G:
BoundedMatrix<double,6,2> shape_func = ZeroMatrix(6, 2);
BoundedMatrix<double,6,3> G = ZeroMatrix(6,3);
for (int ii = 0; ii< 3; ii++)
{
int column = ii*2;
shape_func(column,0) = N[ii];
shape_func(column + 1, 1) = shape_func(column,0);
}
noalias(G)=prod(shape_func, trans(DN_DX));
G*=volume;
array_1d<double,6> aaa;
noalias(aaa) = prod(G,pres_inc);
array_1d<double,3> aux;
aux[0]=aaa[0];
aux[1]=aaa[1];
//z-component is zero
aux[2]=0.0;
geom[0].FastGetSolutionStepValue(VAUX) += aux;
//reusing aux for the second node
aux[0]=aaa[2];
aux[1]=aaa[3];
//z-component is zero
geom[1].FastGetSolutionStepValue(VAUX) += aux;
//reusing aux for the third node
aux[0]=aaa[4];
aux[1]=aaa[5];
geom[2].FastGetSolutionStepValue(VAUX) += aux;
}
//double beta_newm=0.25*(1.0-alpha_bossak)*(1.0-alpha_bossak);
alpha_bossak=-0.3;
double coef=0.25*(1.0-alpha_bossak);
//double beta_newm=coef*(1.0-alpha_bossak);
for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0)
{
//VELOCITY = VELOCITY + dt * Minv * VAUX
if (it->FastGetSolutionStepValue(NODAL_MASS)>0.0000000001)
//KRATOS_THROW_ERROR(std::logic_error, "You have not computed the nodal mass!", "");
{
double dt_sq_Minv =coef*dt*dt / it->FastGetSolutionStepValue(NODAL_MASS);
array_1d<double,3>& temp = it->FastGetSolutionStepValue(VAUX);
if(!it->IsFixed(DISPLACEMENT_X))
{
it->FastGetSolutionStepValue(DISPLACEMENT_X)+=dt_sq_Minv*temp[0];
}
if(!it->IsFixed(DISPLACEMENT_Y))
{
it->FastGetSolutionStepValue(DISPLACEMENT_Y)+=dt_sq_Minv*temp[1];
}
}
}
}
KRATOS_CATCH("");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
void UpdateAfterProjection( ModelPart& model_part, double alpha_bossak)
{
KRATOS_TRY
//updating time derivatives (nodally for efficiency)
double dt = model_part.GetProcessInfo()[DELTA_TIME];
array_1d<double,3> DeltaDisp;
double beta_newmark = 0.25*pow((1.00-alpha_bossak),2);
double gamma_newmark = 0.5-alpha_bossak;
/*
ma0 = 1.0/(mBetaNewmark*pow(DeltaTime,2));
ma1 = mGammaNewmark / (mBetaNewmark*DeltaTime);
ma2 = 1.0/(mBetaNewmark*DeltaTime);
ma3 = 1.0/(2.0*mBetaNewmark) - 1.0;
ma4 = mGammaNewmark/mBetaNewmark - 1.0;
*/
double ma0=1.0/(beta_newmark*pow(dt,2));
double ma1=gamma_newmark/(beta_newmark*dt);
double ma2=1.0/(beta_newmark*dt);
double ma3=(1.0/(2.0*beta_newmark))-1.0;
double ma4=(gamma_newmark/beta_newmark)-1.0;
double ma5=dt*0.5*((gamma_newmark/beta_newmark)-2.0);
for(ModelPart::NodeIterator i = model_part.NodesBegin() ; i != model_part.NodesEnd() ; ++i)
{
noalias(DeltaDisp) = (i)->FastGetSolutionStepValue(DISPLACEMENT) - (i)->FastGetSolutionStepValue(DISPLACEMENT,1);
array_1d<double,3>& CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY,0);
array_1d<double,3>& OldVelocity = (i)->FastGetSolutionStepValue(VELOCITY,1);
array_1d<double,3>& CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION,0);
array_1d<double,3>& OldAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION,1);
UpdateVelocity(CurrentVelocity,DeltaDisp,OldVelocity,OldAcceleration, ma1, ma4, ma5);
UpdateAcceleration(CurrentAcceleration,DeltaDisp,OldVelocity,OldAcceleration, ma0, ma2, ma3);
}
KRATOS_CATCH("");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void UpdateVelocity(array_1d<double, 3>& CurrentVelocity, const array_1d<double, 3>& DeltaDisp,
const array_1d<double, 3>& OldVelocity,
const array_1d<double, 3>& OldAcceleration, double& ma1, double& ma4, double & ma5)
{
noalias(CurrentVelocity) = ma1*DeltaDisp - ma4*OldVelocity - ma5*OldAcceleration;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
inline void UpdateAcceleration(array_1d<double, 3>& CurrentAcceleration, const array_1d<double, 3>& DeltaDisp,
const array_1d<double, 3>& OldVelocity,
const array_1d<double, 3>& OldAcceleration, double& ma0, double& ma2, double & ma3)
{
noalias(CurrentAcceleration) = ma0*DeltaDisp - ma2*OldVelocity - ma3*OldAcceleration;
}
void UpdatePressuresNew (TSystemMatrixType& mMconsistent, TSystemVectorType& mMdiagInv,ModelPart& r_model_part, double bulk_modulus, double density)
{
KRATOS_TRY
//getting the dof position
unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
// const double dt = r_model_part.GetProcessInfo()[DELTA_TIME];
//!!!! LATER ON - CHANGE THE WAY TO COMPUTE BULK MODULUS INSTEAD OF PASSING IT AS A PARAMETER
//resetting the pressures to zero
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
in->FastGetSolutionStepValue(PRESSURE)=0.0;
}
//for pressure vectors
const int size = TSparseSpace::Size(mMdiagInv);
TSystemVectorType p_n(size);
//TSystemMatrixType aux(size,size);
//aux=ZeroMatrix(size,size);
TSystemVectorType temp(size);
TSystemVectorType history(size);
//TSparseSpace::SetToZero(p_n1);
TSparseSpace::SetToZero(p_n);
TSparseSpace::SetToZero(history);
//assuming that the bulk modulus is the same for all nodes in the model part
//p_n is the history, d_a - change_of_nodal_area/current_nodal_area
int i=0;
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 )// && in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
i=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim;
p_n[i]=in->FastGetSolutionStepValue(PRESSURE,1);
}
}
//KRATOS_WATCH(p_n)
//history (multiplied by the consistent mass matrix) and then by the inverse lumped mass matrix
TSparseSpace::Mult(mMconsistent, p_n, history);
//KRATOS_WATCH(history)
int aa=0;
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
aa=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim;
if (in->FastGetSolutionStepValue(IS_FLUID)==1.0)
{
//+temp[aa]/density
in->FastGetSolutionStepValue(PRESSURE)=(mMdiagInv[aa]*history[aa])+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA));
//this one is without mass matrix difference stab, just the laplacian
//in->FastGetSolutionStepValue(PRESSURE)=p_n[aa]+temp[aa]/density+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA));
}
}
}
KRATOS_CATCH("");
}
//this function updates pressure after the Dx is obtained at every step of N-R procedure
void UpdatePressures ( TSystemMatrixType& mD,
TSystemMatrixType& mMconsistent, TSystemVectorType& mMdiagInv,ModelPart& r_model_part, double bulk_modulus, double density)
{
KRATOS_TRY
//getting the dof position
// unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X);
// const double dt = r_model_part.GetProcessInfo()[DELTA_TIME];
//!!!! LATER ON - CHANGE THE WAY TO COMPUTE BULK MODULUS INSTEAD OF PASSING IT AS A PARAMETER
//for pressure vectors
const int size = TSparseSpace::Size(mMdiagInv);
//for displacement vectors
const int size_disp = TDim*TSparseSpace::Size(mMdiagInv);
TSystemVectorType p_n(size);
TSystemVectorType dp(size);
TSystemVectorType p_n1(size);
TSystemVectorType history(size);
//TSystemVectorType temp1(size);
//TSystemVectorType temp2(size);
TSparseSpace::SetToZero(p_n);
TSparseSpace::SetToZero(dp);
TSparseSpace::SetToZero(p_n1);
TSparseSpace::SetToZero(history);
//TSparseSpace::SetToZero(temp1);
//TSparseSpace::SetToZero(temp2);
TSystemMatrixType aux(size,size);
TSystemVectorType temp(size);
TSystemVectorType displ(size_disp);
/*
TSystemMatrixType GlobLapl (size,size);
TSystemMatrixType LocLapl (TDim+1,TDim+1);
for (typename ElementsArrayType::iterator im=r_model_part.ElementsBegin(); im!=r_model_part.ElementsEnd(); ++im)
{
boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX;
array_1d<double,TDim+1> N;
array_1d<unsigned int ,TDim+1> local_indices;
Geometry< Node<3> >& geom = im->GetGeometry();
//calculating elemental values
double Volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
array_1d<double,3> ms_vel_gauss = ZeroVector(3);
const array_1d<double,3>& fv0 = geom[0].FastGetSolutionStepValue(VELOCITY);
const array_1d<double,3>& fv1 = geom[1].FastGetSolutionStepValue(VELOCITY);
const array_1d<double,3>& fv2 = geom[2].FastGetSolutionStepValue(VELOCITY);
array_1d<double,3> fv3 = ZeroVector(3);
if (TDim==3)
fv3 = geom[3].FastGetSolutionStepValue(VELOCITY);
double nu = geom[0].FastGetSolutionStepValue(VISCOSITY)+
geom[1].FastGetSolutionStepValue(VISCOSITY) +
geom[2].FastGetSolutionStepValue(VISCOSITY);
double density = geom[0].FastGetSolutionStepValue(DENSITY)+
geom[1].FastGetSolutionStepValue(DENSITY) +
geom[2].FastGetSolutionStepValue(DENSITY);
ms_vel_gauss=fv0+fv1+fv2;
if (TDim==2)
{
nu*=0.33333333333;
density*=0.33333333333;
ms_vel_gauss*=0.33333333333;
}
if (TDim==3)
{
ms_vel_gauss+=fv3;
nu+=geom[3].FastGetSolutionStepValue(VISCOSITY);
density+=geom[3].FastGetSolutionStepValue(DENSITY);
ms_vel_gauss*=0.25;
nu*=0.25;
density*=0.25;
}
//finiding local indices
//for(int ii = 0; ii<TDim+1; ii++)
for(unsigned int ii = 0; ii<geom.size(); ii++)
{
local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId();
}
//the structural elements should not contribute to the Laplacian
int str_nr=0;
for (unsigned int k = 0;k<geom.size();k++)
{
str_nr+=(unsigned int)(geom[k].FastGetSolutionStepValue(IS_STRUCTURE));
}
int switch_var=0;
//set to zero the entries of the str. elements
if (str_nr==TDim+1)
switch_var=0;
else
switch_var =1;
//ms_vel_gauss[i] = msN[0]*(fv0[i]) + msN[1]*(fv1[i]) + msN[2]*(fv2[i]);
//but with one integration N=0.333333333
double norm_u;
double h;
if (TDim==2)
{
ms_vel_gauss[0] = 0.33333333333333*(fv0[0]+fv1[0]+fv2[0]);
ms_vel_gauss[1] = 0.33333333333333*(fv0[1]+fv1[1]+fv2[1]);
ms_vel_gauss[2] = 0.0;
//calculating parameter tau (saved internally to each element)
h = sqrt(2.00*Volume);
norm_u = ms_vel_gauss[0]*ms_vel_gauss[0] + ms_vel_gauss[1]*ms_vel_gauss[1];
norm_u = sqrt(norm_u);
}
if (TDim==3)
{
ms_vel_gauss[0] = 0.25*(fv0[0]+fv1[0]+fv2[0]+fv3[0]);
ms_vel_gauss[1] = 0.25*(fv0[1]+fv1[1]+fv2[1]+fv3[1]);
ms_vel_gauss[2] = 0.25*(fv0[2]+fv1[2]+fv2[2]+fv3[2]);
//calculating parameter tau (saved internally to each element)
h = sqrt(2.00*Volume);
norm_u = ms_vel_gauss[0]*ms_vel_gauss[0] + ms_vel_gauss[1]*ms_vel_gauss[1] + ms_vel_gauss[2]*ms_vel_gauss[2];
norm_u = sqrt(norm_u);
}
//- 4.0/(bulk_modulus*h*h)
//double tau = 1.00 / ( 4.00*nu/(h*h) - bulk_modulus*dt/h+2.00*norm_u/h);
//double tau=(bulk_modulus)*dt*h/(norm_u+nu/h);
//double tau=(bulk_modulus)*dt*dt;//h/(norm_u+*nu/h);
//my last proposal
double tau = (bulk_modulus)*dt*nu/(norm_u*norm_u+(nu/dt));
//Ric's proposal - doesnt work - checked with 2d-splash
//double tau = (bulk_modulus)*dt*1.0/((1.0/dt)+(nu/h*h));
//SWITCHED OFF THE STABILIZATION!
switch_var=0;
noalias(LocLapl)=switch_var*prod(DN_DX,trans(DN_DX));
for(unsigned int row = 0; row<TDim+1; row++)
{
unsigned int row_index = local_indices[row] / (TDim);
for(unsigned int col = 0; col<TDim+1; col++)
{
unsigned int col_index = local_indices[col] /(TDim);
GlobLapl(row_index, col_index)+=tau*Volume*LocLapl(row,col);
}
}
//end of the loop over elements
}
for (int i=0;i<mMdiagInv.size(); i++)
{
aux(i,i)=GlobLapl(i,i)*mMdiagInv(i);
}
*/
//assuming that the bulk modulus is the same for all nodes in the model part
//
//additionally here we update densities, simply by implyimg: ro_0xV_0=ro_1xV_1
int i=0;
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
//in pn we save old pressures
if (i<size)
p_n[i]=in->FastGetSolutionStepValue(PRESSURE,1);
i++;
//here we update densities
//if (in->FastGetSolutionStepValue(NODAL_AREA)!=0.0)
// in->FastGetSolutionStepValue(DENSITY)=in->FastGetSolutionStepValue(DENSITY,1)*in->FastGetSolutionStepValue(NODAL_AREA,1)/in->FastGetSolutionStepValue(NODAL_AREA);
}
}
//temp = prod(aux, p_n);
//history (multiplied by the consistent mass matrix)
TSparseSpace::Mult(mMconsistent, p_n, history);
//now we compute the pressure increment
//first we save in the p_n1 the current deltap = KDd //Dx is denoted by d
//
//we store displacements in one big vector
for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof)
{
displ[i_dof->EquationId()]=i_dof->GetSolutionStepValue()-i_dof->GetSolutionStepValue(1);
}
TSparseSpace::Mult(mD, displ, dp);
//KRATOS_WATCH(bulk_modulus)
dp*=(bulk_modulus*density);
//now we add the history (multiplied by the consistent mass matrix)
//adding: mMconsistent*p_n + KDdipsl
//p_n1=(temp+dp);
//and now we multiply the result with the inverse of the lumped mass matrix
//we reutilize the auxilliary matrix temp
for (int ii=0; ii<size; ii++)
{
//temp1[ii]=mMdiagInv[ii]*p_n1[ii];
p_n1[ii]=mMdiagInv[ii]*(history[ii]+dp[ii]);
}
//this is just to check
//for (int ii=0; ii<size;ii++)
//{
//temp2[ii]=(mMdiagInv[ii]*dp[ii])+p_n[ii];
//}
//resetting the pressures to zero
//
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
in->FastGetSolutionStepValue(PRESSURE)=0.0;
}
}
int aa=0;
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
//not to add the "lonely" nodes , that are not part of the model (e.g. structure walls)
if (aa<size)
in->FastGetSolutionStepValue(PRESSURE)=p_n1[aa];//+temp[aa]/density;
//in->FastGetSolutionStepValue(PRESSURE)=temp2[aa];
aa++;
}
}
//KRATOS_WATCH("PRESSURE UPDATE FUNCTION INSIDE BULDER AND SOLVER")
/*
for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in)
{
KRATOS_WATCH(in->FastGetSolutionStepValue(PRESSURE));
}
*/
KRATOS_CATCH("");
}
//**************************************************************************
//**************************************************************************
/*
void SystemSolve(
const TSystemMatrixType& A,
const TSystemMatrixType& D,
const TSystemVectorType& mMass_inverse,
const TSystemVectorType& mpreconditioner,
TSystemVectorType& x,
const TSystemVectorType& b
)
{
KRATOS_TRY
const int size = TSparseSpaceType::Size(rX);
unsigned int IterationsNumber = 0;
TSystemVectorType r(size);
TSystemVectorType q(size);
PreconditionedMult(rA,rX,r);
TSparseSpaceType::ScaleAndAdd(1.00, rB, -1.00, r);
BaseType::mBNorm = TSparseSpaceType::TwoNorm(rB);
VectorType p(r);
VectorType q(size);
double roh0 = TSparseSpaceType::Dot(r, r);
double roh1 = roh0;
double beta = 0;
if(fabs(roh0) < 1.0e-30) //modification by Riccardo
// if(roh0 == 0.00)
return false;
do
{
PreconditionedMult(rA,p,q);
double pq = TSparseSpaceType::Dot(p,q);
//if(pq == 0.00)
if(fabs(pq) <= 1.0e-30)
break;
double alpha = roh0 / pq;
TSparseSpaceType::ScaleAndAdd(alpha, p, 1.00, rX);
TSparseSpaceType::ScaleAndAdd(-alpha, q, 1.00, r);
roh1 = TSparseSpaceType::Dot(r,r);
beta = (roh1 / roh0);
TSparseSpaceType::ScaleAndAdd(1.00, r, beta, p);
roh0 = roh1;
BaseType::mResidualNorm = sqrt(roh1);
BaseType::mIterationsNumber++;
} while(BaseType::IterationNeeded() && (fabs(roh0) > 1.0e-30)
KRATOS_CATCH("");
}
*/
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ResidualBasedEliminationDiscreteLaplacianBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER defined */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__minv_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_fc64_fc64)
// op(A') function: GB (_unop_tran__minv_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_FC64_minv (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC64_minv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_FC64_minv (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_minv (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_minv (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
padlock_fmt_plug.c | /*
* Format for cracking Padlock password databases.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_padlock;
#elif FMT_REGISTERS_H
john_register_one(&fmt_padlock);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
static int omp_t = 1;
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha.h"
#include "loader.h"
#include "aes.h"
#include "aes_ccm.h"
#include "pbkdf2_hmac_sha256.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "Padlock"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 AES " SHA256_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_TAG "$padlock$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
// empty database
{"$padlock$1$10000$64$16$bc5ce69b9b9dadafb4566f570cccd15d$6c0f57ec2a0a98974c567cc25a12fff1$16$226683e36d47dc8a3bf7c49fedfdae88$10$07ccbca4012cfa37997d", "openwall"}, // plaintext is "[]"
// database with one entry
{"$padlock$1$10000$64$16$bc5ce69b9b9dadafb4566f570cccd15d$0fb7f674020b4223715049a56fa513ca$16$edc14c476a12edc107ac694fc9cb0862$195$8b7a014217b512f88decb734d5edf7c36747dc44a244e01cc1e3e2366e8f70c32edb3a037c61fdc5dba7565131cdbea1a8bb87a9b7923a70d44c8b2ea14f4109adcd3a2f9d1847fd7b77baf2237249354cddc26db31f00188a5160f98a6319cb3d0cca8edcda7fda5d8b2368a584a7fd96eb45adf226176a40477bc3c7300bb51f1e411721b5eeac2af382623bb18a8547cde12d1f21ee26e36a801f77246bbd6e6c3ee8a39f8161b2f7847f5a42a4573bf0de14413e1ce177a0f14dd966f8e71653ae", "openwall"},
// database with one entry
{"$padlock$1$10000$64$16$217396d3560f2b9129f7556d556b3150$ff3e884c6211db93abc354d3557d9c04$16$8f1203538ceb691da7443dfd16bc6a36$198$b9c8bf9a7972f71d05b91f60edd4463661730cf4c34a9f7875e759fd8752d5c84ca75b16f3b278f7553ed6d005438f072fdfc3f2d26a5448dcd48d71e707446c1ee2b91761448e742d772998cc61160b5f2ebb80ecf64c8aab7a71a932cadc48aed0cca6dbcef971306b0ba74058f0671b4078c125bcf3eb394a9e9a317b96de48c34af7494e02522de94902f63f316167cb7ee40c4b42a50fe61bdd979c41531ed1a2fa4a13c33ffcf2c7ad4be4abc240f8c94e51afaee0ad9afd494d74c5a92cf0cc751f29", "password@12345"},
{NULL}
};
static struct custom_salt {
int version;
int iterations;
int tag_len;
int saltlen;
int ctlen;
int addlen;
unsigned char salt[64];
unsigned char iv[16];
unsigned char tag[128];
unsigned char add[128];
unsigned char ct[4096];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static int *cracked, cracked_count;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len));
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext, *ctcopy, *keeptr;
int extra;
int res;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) // version
goto bail;
if (!isdec(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // iterations
goto bail;
if (!isdec(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // tag_len
goto bail;
if (!isdec(p))
goto bail;
res = atoi(p);
if (res != 64 && res != 128 && res != 96)
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // saltlen
goto bail;
if (!isdec(p))
goto bail;
res = atoi(p);
if (res > 128)
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // salt
goto bail;
if (hexlenl(p, &extra) > res * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // iv
goto bail;
if (hexlenl(p, &extra) > 16 * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // addlen
goto bail;
if (!isdec(p))
goto bail;
res = atoi(p);
if (res > 128)
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // add
goto bail;
if (hexlenl(p, &extra) > res * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // ctlen
goto bail;
if (!isdec(p))
goto bail;
res = atoi(p);
if (res > 4096)
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) // ct
goto bail;
if (hexlenl(p, &extra) > res * 2 || extra)
goto bail;
if (!ishexlc(p))
goto bail;
MEM_FREE(keeptr);
return 1;
bail:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i;
char *p = ciphertext, *ctcopy, *keeptr;
memset(&cs, 0, sizeof(cs));
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.version = atoi(p);
p = strtokm(NULL, "$");
cs.iterations = atoi(p);
p = strtokm(NULL, "$");
cs.tag_len = atoi(p) / 8; // bits-to-bytes
p = strtokm(NULL, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
p = strtokm(NULL, "$");
for (i = 0; i < 16; i++)
cs.iv[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
p = strtokm(NULL, "$");
cs.addlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.addlen; i++)
cs.add[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
p = strtokm(NULL, "$");
cs.ctlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.ctlen; i++)
cs.ct[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
unsigned char output[4096] = {0};
int i;
unsigned char *tag = cur_salt->ct + cur_salt->ctlen - cur_salt->tag_len; // last "tag_len" bytes
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, master[i], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memset(output, 0, 4096); // avoid possible false positives that can be caused by older "valid" decrypted data
aes_ccm_auth_decrypt(master[i], 256,
cur_salt->ctlen - cur_salt->tag_len,
cur_salt->iv, 13, cur_salt->add, // 13 is the correct iv size for padlock + sjcl combo
cur_salt->addlen, cur_salt->ct, output,
tag, cur_salt->tag_len);
// CCM tag calculation is broken in Padlock + SJCL combination. Padlock sends "add" data to SJCL
// without doing base64 decoding! As a result the JavaScript code in SJCL behaves very weirdly.
// Instead of trying to emulate this broken behavior and struggling with JavaScript, we simply use
// known plaintext attack here!
if (cur_salt->ctlen - cur_salt->tag_len == 2) { // special case, empty database
if (strncmp((const char*)output, "[]", 2) == 0)
cracked[index+i] = 1;
} else { // general case
if (output[0] != '[')
cracked[index+i] = 0;
else if (strstr((const char*)output, "\"updated\""))
cracked[index+i] = 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
saved_len[index] =
strnzcpyn(saved_key[index], key, sizeof(saved_key[index]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int padlock_iteration_count(void *salt)
{
struct custom_salt *cs = salt;
return (unsigned int) cs->iterations;
}
struct fmt_main fmt_padlock = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
padlock_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
yescrypt-simd_c.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform_c.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
static int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, "JagaricoinR", 10);
HMAC_SHA256_Final(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
p1.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
#define INTERVALS 1000000
int main(int arc, char* argv[])
{
double area; /* The final anser */
double ysum; /* Sum of rectangle heights */
double xi; /* Midpoint of interval */
int i;
ysum = 0.0;
omp_set_num_threads(4);
double begin = omp_get_wtime();
#pragma omp parallel \
shared ( ysum ) \
private ( i, xi )
#pragma omp for reduction ( +: ysum )
for (i=0; i < INTERVALS; i++)
{
xi=((1.0/INTERVALS)*(i+0.5));
ysum+=4.0/(1.0+xi*xi);
}
area = ysum * (1.0/INTERVALS);
double time_spent = (double)(omp_get_wtime() - begin);
printf("pi is %13.11f\n", area);
printf ("Time: %f\n", time_spent);
return 0;
}
|
GB_binop__land_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_uint8
// A.*B function (eWiseMult): GB_AemultB__land_uint8
// A*D function (colscale): GB_AxD__land_uint8
// D*A function (rowscale): GB_DxB__land_uint8
// C+=B function (dense accum): GB_Cdense_accumB__land_uint8
// C+=b function (dense accum): GB_Cdense_accumb__land_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint8
// C=scalar+B GB_bind1st__land_uint8
// C=scalar+B' GB_bind1st_tran__land_uint8
// C=A+scalar GB_bind2nd__land_uint8
// C=A'+scalar GB_bind2nd_tran__land_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT8 || GxB_NO_LAND_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HashFunction.h | /*
* HashFunction.h
*
* Created on: 20/lug/2016
* Author: samuele
*/
#ifndef HASHFUNCTION_H_
#define HASHFUNCTION_H_
#include "HashType.h"
#include "../Spaced/SpacedQmer_Multi.h"
#include <algorithm>
#include <iostream>
inline static hash_type CharToInt(char ch)
{
if(ch == 'A')
return 0;
if(ch == 'C')
return 1;
if(ch == 'G')
return 2;
if(ch == 'T')
return 3;
return 4; //ERROR CODE
}
inline static hash_type CharToIntComplement(char ch)
{
if(ch == 'A')
return 3;
if(ch == 'C')
return 2;
if(ch == 'G')
return 1;
if(ch == 'T')
return 0;
return 4; //ERROR CODE
}
//Hash per tutti 1 su spaced qmer
inline static void GetHash(const string& s_Str, size_t startQmer, size_t length, Hash_Err& hash_err, hash_type (*fConvertion)(char))
{
hash_err.reset();
// #pragma omp parallel for ordered
for(size_t i = startQmer; i < startQmer + length; ++i)
{
hash_type ch = (*fConvertion)(s_Str[i]);
// #pragma omp ordered
if(ch == 4) //Errore conversione
hash_err.push_back_error(i);
else
hash_err.hash |= ch << ((i - startQmer) * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
}
//Hash per spaced qmer con *
inline static void GetHash(const string& s_Str, size_t startQmer, const SpacedQmer& spaced_qmer,
Hash_Err& hash_err, hash_type (*fConvertion)(char))
{
hash_err.reset();
const Position& pos_one = spaced_qmer.GetPosOne();
for(size_t j = 0; j < pos_one.size(); ++j)
{
hash_type ch = (*fConvertion)(s_Str[startQmer+pos_one[j]]);
if(ch == 4) //Errore conversione
hash_err.push_back_error(j);
else
hash_err.hash |= ch << (j * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
}
//Hash veloce con spaced qmer tutti 1
inline static void GetHashes_speedup_previous(const string& s_Str, size_t length,
Hash_Err_V& vHash, hash_type (*fConvertion)(char)) {
vHash.clear();
if(s_Str.size() >= length)
{
size_t n_hashes = s_Str.size() - length + 1;
vHash.resize(n_hashes); //Crea vettore
GetHash(s_Str, 0, length, vHash[0], fConvertion);//primo da computare a parte
for(size_t pos=1; pos < vHash.size(); ++pos)
{
Hash_Err& prev_hash = vHash[pos-1];
Hash_Err& curr_hash = vHash[pos];
//copia hash e sottrai una posizione dal precedente
curr_hash.hash = prev_hash.hash;
curr_hash.hash >>= 2; //sposta 2 bit, esce una lettera
curr_hash.sub_pos_err(1, prev_hash);
hash_type enter = (*fConvertion)(s_Str[pos+length-1]);
if(enter == 4)
curr_hash.push_back_error(length-1);
else
curr_hash.hash |= enter << ((length - 1) * 2); //aggiungi ultimo elemento OR possibile perchè prima ho
//diviso per 4 e la posizione dove scrivo ha sicuramente 0
}
}
}
inline static void GetHashes_naive(const string& s_Str, const SpacedQmer& spaced_qmer,
Hash_Err_V& vHash, hash_type (*fConvertion)(char))
{
// bool isAllOne = spaced_qmer.GetWeight() == spaced_qmer.GetQ();
// if(isAllOne)
// GetHashes_speedup_previous(s_Str, spaced_qmer.GetQ(), vHash, fConvertion);
// else
// {
vHash.clear();
if(s_Str.size() >= spaced_qmer.GetQ())
{
size_t n_hashes = s_Str.size() - spaced_qmer.GetQ() + 1;
vHash.resize(n_hashes); //Crea vettore
#pragma omp parallel for
for(size_t pos=0; pos < vHash.size(); ++pos)
GetHash(s_Str, pos, spaced_qmer, vHash[pos], fConvertion);
}
// }
}
inline static void compute_hash_for_speedup_previous(const string& s_Str,
const Position& pos_one_current, const Position& pos_one_prev,
const PreviousShift& curr_sp_shift,
const Hash_Err& prev_hash_err,
size_t idx_curr_hash, Hash_Err& curr_hash_err,
hash_type (*fConvertion)(char))
{
//copia hash e errori
curr_hash_err.hash = prev_hash_err.hash; //Copia hash
curr_hash_err.hash >>= 2*curr_sp_shift.one_exit;//Shifta correttamente
//reset one che non fanno più parte dell'hash
if(!curr_sp_shift.one_to_remove.empty())
{
hash_type reset_one = 0;
for(size_t j = 0; j < curr_sp_shift.one_to_remove.size(); ++j)
reset_one |= (hash_type)3 << (curr_sp_shift.one_to_remove[j] * 2);
curr_hash_err.hash &= ~reset_one;
}
//Controlla se attualmente hash è corretto
if(!prev_hash_err.isCorrect())
{
long curr_pos_one = 0;
for(size_t e = 0; e < prev_hash_err.size_error(); ++e)
if((curr_pos_one = prev_hash_err[e]-curr_sp_shift.one_exit) >= 0)
if(pos_one_prev[prev_hash_err[e]]-curr_sp_shift.shift_min == pos_one_current[curr_pos_one])
curr_hash_err.push_back_error(curr_pos_one);//aggiorna posizione errore
}
//aggiorna posizioni da cambiare su hash
for(size_t j = 0; j < curr_sp_shift.one_to_change.size(); ++j)
{
const size_t& i_to_change = curr_sp_shift.one_to_change[j];
size_t index_char = idx_curr_hash+pos_one_current[i_to_change];
hash_type ch = (*fConvertion)(s_Str[index_char]);
if(ch == 4) //Errore conversione
curr_hash_err.push_back_error(i_to_change);
else
curr_hash_err.hash |= ch << (i_to_change * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
//aggiorna rimanenti posizioni da cambiare su hash (quelle uscite son già rimosse)
//TODO: si elimina questo pezzo (salta if) se il numero di uno son diversi,
//in quanto non so dove devo andar ad inserire e rimuovere,
//NB: l'informazione dove inserire e rimuovere è contenuta tutta
//sui vettori one_to_change e one_to_remove in quest'ultimo caso
if(pos_one_current.size() == pos_one_prev.size())
for(size_t j = pos_one_current.size()-curr_sp_shift.one_exit; j < pos_one_current.size(); ++j)
{
size_t index_char = idx_curr_hash+pos_one_current[j];
hash_type ch = (*fConvertion)(s_Str[index_char]);
if(ch == 4) //Errore conversione
curr_hash_err.push_back_error(j);
else
curr_hash_err.hash |= ch << (j * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
////////////////////////////////////////////////////////////////////
if(!curr_hash_err.isCorrect())
curr_hash_err.sort_uniq_err();
}
inline static void GetHashes_speedup_previous(const string& s_Str, const SpacedQmer& spaced_qmer,
Hash_Err_V& vHash, hash_type (*fConvertion)(char)) {
// bool isAllOne = spaced_qmer.GetWeight() == spaced_qmer.GetQ();
// if(isAllOne)
// GetHashes_speedup_previous(s_Str, spaced_qmer.GetQ(), vHash, fConvertion);
// else
// {
auto get_hash = [&](size_t curr_idx_hash, const PreviousShift& curr_shift){
Hash_Err& curr_hash = vHash[curr_idx_hash];
if(spaced_qmer.GetWeight() < curr_shift.GetSize())
GetHash(s_Str, curr_idx_hash, spaced_qmer, curr_hash, fConvertion);
else
{
size_t pos_hash_get = curr_idx_hash-curr_shift.shift_min;//la posizione dell'hash presa è la posizione attuale meno l'indice dello shift dove si fan meno cambiamenti
const Hash_Err& prev_hash = vHash[pos_hash_get];
compute_hash_for_speedup_previous(s_Str,
spaced_qmer.GetPosOne(), spaced_qmer.GetPosOne(),
curr_shift,
prev_hash,
curr_idx_hash, curr_hash,
fConvertion);
}
};
long n_hashes = s_Str.size()-spaced_qmer.GetQ()+1;
vHash.clear();
if(n_hashes>0)
{
const V_PreviusShift& shift = spaced_qmer.GetShiftMinChange();
//Compute hash
vHash.resize(n_hashes); //Crea vettore
GetHash(s_Str, 0, spaced_qmer, vHash[0], fConvertion);//primo da computare a parte
size_t lim_max = vHash.size();
size_t lim_min = shift.size() < lim_max ? shift.size() : lim_max;
for(size_t i = 1; i < lim_min; ++i)//Per tutte le posizioni che contemplano gli shift nel primo pezzo di sequenza
get_hash(i, shift[i]);
for(size_t i = lim_min; i < lim_max; ++i)
get_hash(i, shift.back());
}
// }
}
#endif /* HASHFUNCTION_H_ */
|
helloworld_mp.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char argv[])
{
int omp_rank, omp_threads;
#pragma omp parallel private(omp_rank)
{
omp_rank = omp_get_thread_num();
omp_threads = omp_get_num_threads();
printf("Hello World! by thread number %d of threads %d\n", omp_rank, omp_threads);
}
}
|
conv_im2col_layer.h | //Tencent is pleased to support the open source community by making FeatherCNN available.
//Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
//in compliance with the License. You may obtain a copy of the License at
//
//https://opensource.org/licenses/BSD-3-Clause
//
//Unless required by applicable law or agreed to in writing, software distributed
//under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
//CONDITIONS OF ANY KIND, either express or implied. See the License for the
//specific language governing permissions and limitations under the License.
#pragma once
#include "../feather_simple_generated.h"
#include "conv_layer.h"
#include "blob.h"
#include "arm/generic_kernels.h"
#include "arm/sgemm.h"
#include <assert.h>
#include <stdio.h>
namespace feather
{
void naive_sgemm(int M, int N, int L, float* A, float* B, float* C)
{
for (int i = 0; i < M; ++i) //loop over rows in C
{
for (int j = 0; j < N; ++j) //loop over columns in C
{
float sigma = 0;
for (int k = 0; k < L; ++k)
{
sigma += A[i * L + k] * B[k * N + j];
}
C[i * N + j] = sigma;
}
}
}
class ConvIm2colLayer : public ConvLayer
{
public:
ConvIm2colLayer(const LayerParameter *layer_param, const RuntimeParameter<float>* rt_param)
: img_buffer(0), ConvLayer(layer_param, rt_param)
{
}
int Forward()
{
MEMPOOL_CHECK_RETURN(common_mempool->GetPtr(&img_buffer));
if(group <=0) group = 1;
#if 1
if (kernel_width == 1 && kernel_height == 1 && stride_height == 1 && stride_width == 1)
{
if (output_channels % 8 == 0)
{
block_sgemm_external_pack_threading_8x8((int)output_channels, (int)output_width * (int)output_height,
(int)input_channels * (int)kernel_width * (int)kernel_height,
packed_kernel, input, output, (int)num_threads);
}
else
{
block_sgemm_external_pack_threading((int)output_channels, (int)output_width * (int)output_height,
(int)input_channels * (int)kernel_width * (int)kernel_height,
packed_kernel, input, output, (int)num_threads);
}
}
else
{
Im2col();
//jintaomeng support the case for group != input_channels
int block = (int)input_channels / group * (int)kernel_width * (int)kernel_height;
if (output_channels % 8 == 0)
{
for (int k = 0; k < group; k++)
block_sgemm_external_pack_threading_8x8((int)output_channels, (int)output_width * (int)output_height,
(int)input_channels / group * (int)kernel_width * (int)kernel_height,
packed_kernel, img_buffer + k * block, output, (int)num_threads);
}
else
{
for (int k = 0; k < group; k++)
block_sgemm_external_pack_threading((int)output_channels, (int)output_width * (int)output_height,
(int)input_channels / group * (int)kernel_width * (int)kernel_height,
packed_kernel, img_buffer + k * block, output, (int)num_threads);
}
}
#else
Im2col();
naive_sgemm(output_channels, output_height * output_width, input_channels * kernel_width * kernel_height, kernel_data, img_buffer, output);
#endif
if (bias_term)
{
size_t out_stride = output_width * output_height;
for (int i = 0; i < output_channels; ++i)
{
float bias = bias_data[i];
for (int j = 0; j < out_stride; ++j)
{
output[out_stride * i + j] = output[out_stride * i + j] + bias;
}
}
}
return 0;
}
bool Im2col()
{
const int stride = kernel_height * kernel_width * output_height * output_width;
if ((kernel_width == 1 && kernel_height == 1) && (stride_height == 2 && stride_width == 2))
{
float* ret = img_buffer;
#pragma omp parallel for num_threads(num_threads)
for (int k = 0; k < input_channels; k++)
{
int retID = stride * k;
{
for (int i = 0; i < output_height; i++)
{
for (int j = 0; j < output_width; j++)
{
//calculate each row
int row = 2 * i - (int)padding_top;
int col = 2 * j - (int)padding_left;
if (row < 0 || row >= input_height || col < 0 || col >= input_width)
{
ret[retID] = 0;
}
else
{
size_t index = k * input_width * input_height + row * input_width + col; //(i+u)*input_width+j+v;
ret[retID] = input[index];
}
retID++;
}
}
}
}
}
else
{
float* ret = img_buffer;
#pragma omp parallel for num_threads(num_threads)
for (int k = 0; k < input_channels; k++)
{
int retID = stride * k;
for (int u = 0; u < kernel_height; u++) for (int v = 0; v < kernel_width; v++)
{
for (int i = 0; i < output_height; i++)
{
for (int j = 0; j < output_width; j++)
{
//calculate each row
int row = u - (int)padding_top + i * (int)stride_height;
int col = v - (int)padding_left + j * (int)stride_width;
//printf("row %d, col %d\n", row, col);
if (row < 0 || row >= input_height || col < 0 || col >= input_width)
{
ret[retID] = 0;
}
else
{
size_t index = k * input_width * input_height + row * input_width + col; //(i+u)*input_width+j+v;
ret[retID] = input[index];
}
retID++;
}
}
}
}
}
return true;
}
int Init()
{
int M = (int)output_channels;
int L = (int)input_channels * (int)kernel_height * (int)kernel_width;
int eM = M + (8 - M % 8) % 8;
MEMPOOL_CHECK_RETURN(private_mempool.Alloc(&packed_kernel, sizeof(float) * eM * L));
MEMPOOL_CHECK_RETURN(common_mempool->Request(sizeof(float) * (input_channels * kernel_height * kernel_width) * (output_width * output_height)));
if (M % 8 == 0)
{
externalPackA8(M, L, packed_kernel, kernel_data, L);
}
else
{
externalPackA(M, L, packed_kernel, kernel_data, L);
}
//Setup input and output pointers.
input = _bottom_blobs[_bottom[0]]->data();
//_bottom_blobs[_bottom[0]]->PrintBlobInfo();
output = _top_blobs[_top[0]]->data();
//_top_blobs[_top[0]]->PrintBlobInfo();
//printf("++stride %d %d\n", stride_height, stride_width);
//printf("++padding %d %d %d %d\n", padding_left, padding_top, padding_right, padding_bottom);
//printf("++kernel %d %d\n", kernel_width, kernel_height);
//printf("++bias term %d\n", bias_term);
return 0;
}
private:
float* packed_kernel;
float* img_buffer;
float* input;
float* output;
};
};
|
t_cholmod_super_numeric.c | /* ========================================================================== */
/* === Supernodal/t_cholmod_super_numeric =================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/Supernodal Module. Copyright (C) 2005-2012, Timothy A. Davis
* The CHOLMOD/Supernodal Module is licensed under Version 2.0 of the GNU
* General Public License. See gpl.txt for a text of the license.
* CHOLMOD is also available under other licenses; contact authors for details.
* http://www.suitesparse.com
* ---------------------------------------------------------------------------*/
/*
*
* Description:
* Contains functions for factorization
* of the elimination tree
*
*/
/* include */
#include "cholmod_internal.h"
#include "cholmod_template.h"
#undef TDEBUG
/*
* Function:
* cholmod_super_numeric
*
* Description:
* Factorizes elimination tree in one of two ways:
* 1. Splits tree into subtree and:
* a. factorize subtree with GPU subtree algorithm
* b. factorize top-of-tree subtree with root algorithm
* 2. Factorizes entire tree with CPU algorithm.
*
*/
static int TEMPLATE (cholmod_super_numeric)
(
cholmod_sparse *A, /* matrix to factorize */
cholmod_sparse *F, /* F = A' or A(:,f)' */
double beta [2], /* beta*I is added to diagonal of matrix to factorize */
cholmod_factor *L, /* factorization */
cholmod_dense *Cwork, /* size (L->maxcsize)-by-1 */
cholmod_common *Common
)
{
/* global variables */
Int i, j, k, size ;
Int *LpxSub, *Iwork;
struct cholmod_subtree_order_t *Bwork;
double *tstart, *tend, *bstart, *bend, *Xwork;
struct cholmod_global_pointers *gb_p, gb_pointer_struct;
struct cholmod_cpu_pointers *cpu_p, cpu_pointer_struct;
struct cholmod_gpu_pointers *gpu_p, gpu_pointer_struct;
struct cholmod_tree_pointers *tree_p, tree_pointer_struct;
struct cholmod_profile_pointers *prof_p, prof_pointer_struct;
struct cholmod_loadbalance_pointers *lb_p, lb_pointer_struct;
int loop;
#ifdef TDEBUG
double subtree_process_time, subtree_factorize_time, root_time;
#endif
/* set structure pointers */
gb_p = &gb_pointer_struct ;
cpu_p = &cpu_pointer_struct ;
gpu_p = &gpu_pointer_struct ;
tree_p = &tree_pointer_struct ;
prof_p = &prof_pointer_struct ;
lb_p = &lb_pointer_struct ;
/* clear global variables */
gb_p->runType = 0;
gb_p->numGPU = 0;
gb_p->numDevice = 0;
gb_p->numSubtree = 0;
gb_p->numRoot = 0;
gb_p->work_size = 0;
gb_p->maxCsize = 0;
gb_p->maxndesc = 0;
gb_p->maxbatch = 0;
gb_p->maxnsrow = 0;
gb_p->maxnscol = 0;
for(i=0; i < CHOLMOD_MAX_NUM_PGPUS; i++) gb_p->check[i] = 0;
PRINTF("\n\n\n");
PRINTFV("useGPU: %d\n",Common->useGPU);
PRINTFV("numGPU: %d\n",Common->numGPU);
PRINTFV("useHybrid: %d\n",Common->useHybrid);
PRINTFV("ompNumThreads: %d\n",Common->ompNumThreads);
PRINTFV("partialFactorization: %d\n",Common->partialFactorization);
PRINTFV("maxGpuMemBytes: %ld\n",Common->maxGpuMemBytes);
/* hybrid is enabled */
if(Common->useHybrid == 1) {
gb_p->runType = 0; /* set to hybrid */
}
else {
gb_p->runType = 2; /* set to GPU only */
}
/* not enough supernodes in the elimination tree */
if(L->nsuper <= SUPERNODE_MIN) {
gb_p->runType = -1; /* set to CPU serial */
}
/* GPU is not enabled */
if(Common->numGPU == 0 || Common->useGPU == 0) {
gb_p->runType = 1; /* set to CPU only */
}
/* matrix is complex */
#ifdef COMPLEX
if(gb_p->runType != 1 && gb_p->runType != -1) {
gb_p->runType = 3; /* set to root only */
}
#endif
/* determine whether to use CPU serial */
if((Common->ompNumThreads == 1 && Common->useGPU == 0) || Common->partialFactorization == 1) {
gb_p->runType = -1; /* set to CPU serial */
}
/* GPU is not enabled */
#ifndef SUITESPARSE_CUDA
if(Common->partialFactorization == 1)
gb_p->runType = -1;
else
gb_p->runType = 1;
#endif
/* print type of run */
PRINTFV("\nrunType:%d\t",gb_p->runType);
if(gb_p->runType == 0) PRINTF("GPU + CPU (hybrid)\n");
if(gb_p->runType == 1) PRINTF("CPU only\n");
if(gb_p->runType == 2) PRINTF("GPU only\n");
if(gb_p->runType == 3) PRINTF("root only\n");
/* allocate memory for subtree algorithm */
if(gb_p->runType != -1) { /* only if subtree algorithm chosen */
/* determine size for load-balance arrays*/
if(L->nsuper < CHOLMOD_MAX_NUM_PGPUS) size = CHOLMOD_MAX_NUM_PGPUS+2;
else size = L->nsuper;
/* allocate workspace */
gb_p->IworkSize = 26*(L->nsuper + 1) + (Common->numGPU_physical+4)*(size + 1);
gb_p->XworkSize = 2*(L->nsuper + 1) + (size + 1);
gb_p->BworkSize = L->nsuper;
gb_p->Iwork = CHOLMOD(malloc) (gb_p->IworkSize, sizeof (Int), Common) ;
gb_p->Xwork = CHOLMOD(malloc) (gb_p->XworkSize, sizeof (double), Common) ;
gb_p->Bwork = CHOLMOD(malloc) (gb_p->BworkSize, sizeof (struct cholmod_subtree_order_t), Common) ;
Iwork = gb_p->Iwork;
Xwork = gb_p->Xwork;
Bwork = gb_p->Bwork;
/* check if enough memory */
if (Common->status < CHOLMOD_OK)
{
gb_p->Iwork = CHOLMOD(free) (gb_p->IworkSize, sizeof (Int), gb_p->Iwork, Common) ;
gb_p->Xwork = CHOLMOD(free) (gb_p->XworkSize, sizeof (double), gb_p->Xwork, Common) ;
gb_p->Bwork = CHOLMOD(free) (gb_p->BworkSize, sizeof (struct cholmod_subtree_order_t), gb_p->Bwork, Common) ;
return (FALSE) ;
}
/* clear workspace */
memset(Iwork,0,gb_p->IworkSize*sizeof(Int));
memset(Xwork,0,gb_p->XworkSize*sizeof(double));
memset(Bwork,0,gb_p->BworkSize*sizeof(struct cholmod_subtree_order_t));
tree_p->supernode_subtree = Iwork;
tree_p->supernode_subtree_ptrs = Iwork + 1*(size_t)(L->nsuper + 1);
tree_p->supernode_batch = Iwork + 2*(size_t)(L->nsuper + 1);
tree_p->supernode_levels = Iwork + 3*(size_t)(L->nsuper + 1);
tree_p->supernode_levels_ptrs = Iwork + 4*(size_t)(L->nsuper + 1);
tree_p->supernode_levels_subtree_ptrs = Iwork + 5*(size_t)(L->nsuper + 1);
tree_p->supernode_parent = Iwork + 6*(size_t)(L->nsuper + 1);
tree_p->supernode_children = Iwork + 7*(size_t)(L->nsuper + 1);
tree_p->supernode_children_ptrs = Iwork + 8*(size_t)(L->nsuper + 1);
tree_p->supernode_children_num = Iwork + 9*(size_t)(L->nsuper + 1);
tree_p->supernode_children_num2 = Iwork + 10*(size_t)(L->nsuper + 1);
tree_p->supernode_children_count = Iwork + 11*(size_t)(L->nsuper + 1);
tree_p->supernode_children_count2 = Iwork + 12*(size_t)(L->nsuper + 1);
tree_p->supernode_num_levels = Iwork + 13*(size_t)(L->nsuper + 1);
tree_p->level_descendants = Iwork + 14*(size_t)(L->nsuper + 1);
tree_p->level_descendants_ptrs = Iwork + 15*(size_t)(L->nsuper + 1);
tree_p->level_num_desc = Iwork + 16*(size_t)(L->nsuper + 1);
tree_p->level_num_desc_ptrs = Iwork + 17*(size_t)(L->nsuper + 1);
tree_p->supernode_size_desc = Iwork + 18*(size_t)(L->nsuper + 1);
tree_p->supernode_size = Iwork + 19*(size_t)(L->nsuper + 1);
memset(tree_p->supernode_size, 0, L->nsuper*sizeof(Int));
tree_p->supernode_root = Iwork + 20*(size_t)(L->nsuper + 1);
tree_p->factor_size = Iwork + 21*(size_t)(L->nsuper + 1);
tree_p->ndescendants = Iwork + 22*(size_t)(L->nsuper + 1);
tree_p->factorized = Iwork + 23*(size_t)(L->nsuper + 1);
memset(tree_p->factorized, 0, L->nsuper*sizeof(Int));
tree_p->parent_subtree = Iwork + 24*(size_t)(L->nsuper + 1);
memset(tree_p->parent_subtree, 0, L->nsuper*sizeof(Int));
lb_p->numSubtreePerDevice = Iwork + 25*(size_t)(L->nsuper + 1);
lb_p->listSubtreePerDevice = Iwork + 25*(size_t)(L->nsuper + 1) + (size_t)(size + 1);
LpxSub = Iwork + 25*(size_t)(L->nsuper + 1) + (Common->numGPU_physical+4)*(size_t)(size + 1);;
//memset(LpxSub, -1, L->nsuper*sizeof(Int));
tree_p->supernode_flop = Xwork;
lb_p->subtreeSize = Xwork + (size_t)(L->nsuper + 1);
lb_p->workPerDevice = Xwork + 2*(size_t)(L->nsuper + 1);
lb_p->subtreeReorder = Bwork;
}
/* allocate integer workspace */
Iwork = Common->Iwork;
cpu_p->SuperMap = Iwork;
cpu_p->RelativeMap = Iwork + L->n;
cpu_p->Next = Iwork + 2*((size_t)L->n);
cpu_p->Previous = Iwork + 2*((size_t)L->n) + 1*((size_t)L->nsuper);
cpu_p->Lpos = Iwork + 2*((size_t)L->n) + 2*((size_t)L->nsuper);
cpu_p->Next_save = Iwork + 2*((size_t)L->n) + 3*((size_t)L->nsuper);
cpu_p->Lpos_save = Iwork + 2*((size_t)L->n) + 4*((size_t)L->nsuper);
cpu_p->Next_local = Iwork + 2*((size_t)L->n) + 5*((size_t)L->nsuper);
cpu_p->Previous_local = Iwork + 2*((size_t)L->n) + 6*((size_t)L->nsuper);
cpu_p->Lpos_local = Iwork + 2*((size_t)L->n) + 7*((size_t)L->nsuper);
/* set host pointers */
cpu_p->C = Cwork->x ;
cpu_p->Map = Common->Flag ;
cpu_p->Head = Common->Head ;
cpu_p->Ls = L->s ;
cpu_p->Lpi = L->pi ;
cpu_p->Lpx = L->px;
cpu_p->Super = L->super ;
cpu_p->Lx = L->x ;
cpu_p->stype = A->stype ;
cpu_p->beta = beta;
cpu_p->Ap = A->p ;
cpu_p->Ai = A->i ;
cpu_p->Ax = A->x ;
cpu_p->Az = A->z ;
cpu_p->Anz = A->nz ;
cpu_p->Apacked = A->packed ;
if (cpu_p->stype != 0)
{
cpu_p->Fp = NULL ;
cpu_p->Fi = NULL ;
cpu_p->Fx = NULL ;
cpu_p->Fz = NULL ;
cpu_p->Fnz = NULL ;
cpu_p->Fpacked = TRUE ;
}
else
{
cpu_p->Fp = F->p ;
cpu_p->Fi = F->i ;
cpu_p->Fx = F->x ;
cpu_p->Fz = F->z ;
cpu_p->Fnz = F->nz ;
cpu_p->Fpacked = F->packed ;
}
for (i = 0 ; i < L->nsuper ; i++)
{
cpu_p->Head[i] = EMPTY;
cpu_p->Next[i] = EMPTY;
}
/* set timer pointers */
tstart = prof_p->g_start;
tend = prof_p->g_end;
bstart = prof_p->b_start;
bend = prof_p->b_end;
/* check if functionality available - (not supported for GPU subtree) */
if(cpu_p->Apacked==0 || cpu_p->stype==0 || cpu_p->beta[0]!=0) {
if(gb_p->runType != 1 && gb_p->runType != -1) {
gb_p->runType = 3; /* set to root only */
}
}
#ifdef SUITESPARSE_CUDA
/* clear floating point exceptions */
if (feclearexcept(FE_OVERFLOW | FE_UNDERFLOW | FE_DIVBYZERO | FE_INVALID | FE_INEXACT | FE_ALL_EXCEPT)){
PRINTF("\nfloating-point exceptions not cleared!\n");
}
else{
PRINTF("\nfloating-point exceptions cleared!\n");
}
#endif
/* clear the Map so that changes in the pattern of A can be detected */
#pragma omp parallel for num_threads(Common->ompNumThreads) if ( L->n > 128 ) schedule (static)
for (i = 0 ; i < L->n ; i++)
cpu_p->Map [i] = EMPTY ;
/*
* Serial Factorization
*
* Description:
* Performs serial factorization on the elimination tree.
* Steps:
* 1. factorize elimination tree serially
*/
if(gb_p->runType == -1)
{
PRINTF("\n\n\nSERIAL FACTORIZATION selected..\n");
int deviceid = 0, check = 0;
check = TEMPLATE2 (CHOLMOD(gpu_factorize_cpu_serial))( Common, L, gb_p, cpu_p, tree_p, prof_p, deviceid);
if(check) return (Common->status >= CHOLMOD_OK); /* early exit if not positive-definite */
}
/*
* Parallel Factorization
*
* Description:
* Performs parallel factorization on the elimination tree.
* Steps:
* 1. build elimination tree
* 2. build subtrees (through binary search)
* 3. load balance devices
* 4. initialize CPU & GPU
* 5. factorize subtrees in parallel
* 6. factorize root
*/
if(gb_p->runType != -1)
{
PRINTF("\n\n\nPARALLEL FACTORIZATION selected..\n");
/* start factorize timer.. */
TIMER_START(tstart,0);
/*
* Build elimination tree
*
* Description:
* stores information about elimination tree:
* supernode sizes, # descendants, # children, children, parents, root.
*/
PRINTF("\n\n\nbuild elimination tree..\n");
TIMER_START(tstart,1);
TEMPLATE2 (CHOLMOD (build_tree))( Common,L,gb_p,cpu_p,tree_p );
/* store copy of # children per supernode */
memcpy(tree_p->supernode_children_num2, tree_p->supernode_children_num, L->nsuper*sizeof(Int));
TIMER_END(tstart,tend,1);
#define SUBTREE_ONLY
int has_subtree;
#ifdef SUBTREE_ONLY
do
#endif
{
#ifdef TDEBUG
subtree_process_time = SuiteSparse_time();
#endif
/*
* Binary search for optimal subtree size
*
* Description:
* perform binary search to find optimal subtree size. Performs up to BINARY_SEARCH
* steps.
*
*/
PRINTF("\n\n\nprocess subtree (binary search) ..\n");
TIMER_START(tstart,2);
TEMPLATE2 (CHOLMOD(binarysearch_tree))( Common, A, L, gb_p, cpu_p, tree_p, LpxSub);
TIMER_END(tstart,tend,2);
/*
* Load-balance Devices
*
* Description:
* Reorder subtree (subtrees) by size, which is quantified by its workload (flop/flops).
* Then load-balance subtree to different device (GPU & CPU), for maximum utilization.
*/
PRINTF("\n\n\nload-balance devices..\n");
TIMER_START(tstart,3);
memset (lb_p->numSubtreePerDevice, 0, sizeof(Int) * size);
TEMPLATE2 (CHOLMOD(loadbalance_gpu))( Common,gb_p,tree_p,lb_p);
TIMER_END(tstart,tend,3);
/*
* Initialize GPU & CPU
*
* Description:
* 1. initialize GPU (set pointers, copy memory, etc.)
* 2. initialize CPU (clear Lx factor, allocate memory for parallel CPU algorithm)
*/
PRINTF("\n\n\ninit GPU & CPU..\n");
TIMER_START(tstart,4);
TEMPLATE2 (CHOLMOD(initialize_gpu))(Common,L,A,gb_p,gpu_p,cpu_p); /* initialize GPU */
TEMPLATE2 (CHOLMOD(initialize_cpu))(Common,L,gb_p,cpu_p,tree_p); /* initialize CPU */
TIMER_END(tstart,tend,4);
/* print system information */
PRINTF("\n\n\nfactorize tree..\n");
PRINTFV("total # supernodes: %d\n",L->nsuper);
PRINTFV("numSubtree: %d\n",gb_p->numSubtree);
PRINTFV("numDevice: %d\n",gb_p->numDevice);
for(i = 0; i < Common->numGPU_physical+2; i++) {
PRINTFV("device:%d ",i);
PRINTFV("numSubtreePerDevice:%d ",lb_p->numSubtreePerDevice[i]);
PRINTFV("workPerDevice:%d\n",lb_p->workPerDevice[i]);
}
PRINTF("\n\ntype of run: ");
if(gb_p->runType == 0) PRINTF("GPU + CPU (hybrid)\n");
if(gb_p->runType == 1) PRINTF("CPU only\n");
if(gb_p->runType == 2) PRINTF("GPU only\n");
if(gb_p->runType == 3) PRINTF("root only\n");
/*
* Supernodal numerical factorization (with GPU & CPU)
*
* Description:
* factorization using three algorithms:
* 1. GPU (subtree that fits GPU)
* 2. CPU (subtree concurrent with GPU)
* 3. root (CPU/GPU) (last subtree that does not fit GPU)
*
* If root_only or CPU_only = 1, the factorization is done
* entirely on the root or CPU.
*/
/* start timer for factorization */
PRINTF("\n\n\nsupernodal numerical factorization..\n");
TIMER_START(tstart,5);
omp_set_nested(1); /* allow for nested omp */
/* set # omp threads:
* 1. CPU only: 1
* 2. GPU only: Common->numGPU_physical
* 3. hybrid: Common->numGPU_physical + 1
*/
if(gb_p->runType == 1) gb_p->numDevice = 1; /* CPU only */
else if(gb_p->runType == 2) gb_p->numDevice = Common->numGPU_physical; /* GPU only */
else gb_p->numDevice = Common->numGPU_physical + 1; /* GPU + CPU (hybrid) */
#ifdef TDEBUG
printf ("subtree process time = %lf\n", SuiteSparse_time() - subtree_process_time);
#endif
int subtree_idx;
has_subtree = 0;
for (subtree_idx = 0; subtree_idx < gb_p->numSubtreeProper; subtree_idx++)
{
has_subtree += tree_p->supernode_num_levels[subtree_idx];
}
if (has_subtree)
{
#ifdef TDEBUG
subtree_factorize_time = SuiteSparse_time();
#endif
/* loop over all devices (GPU,CPU) */
#pragma omp parallel num_threads(gb_p->numDevice)
{
/* local variables */
int deviceid, subtreeid, numSubtreePerDevice, check = 0;
/* set variables */
deviceid = omp_get_thread_num(); /* set device id*/
numSubtreePerDevice = (int)(lb_p->numSubtreePerDevice[deviceid]);
/*
* GPU subtree algorithm
*
* Description:
* Performs factorization on subtree of the elimination tree.
* Uses GPU only algorithm. Optimized for small matrices.
* Case where subtree of elimination tree fits the GPU. Is
* optimized for small matrices.
*
*/
if(deviceid < Common->numGPU_physical)
{
/* set device */
#ifdef SUITESPARSE_CUDA
cudaSetDevice(deviceid);
#endif
/* loop over subtree in current GPU device */
for(subtreeid = 0; subtreeid < numSubtreePerDevice; subtreeid++)
{
/* get current subtree & # supernodes */
Int subtree = lb_p->listSubtreePerDevice[subtreeid + deviceid*gb_p->numSubtree];
if (tree_p->supernode_num_levels[subtree] == 0) continue;
PRINTF("\n\nGPU start -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
TIMER_START(bstart,deviceid);
TEMPLATE2 (CHOLMOD(gpu_factorize_subtree))( Common, gb_p, gpu_p, cpu_p, tree_p, prof_p, L, deviceid, subtree, LpxSub);
TIMER_END(bstart,bend,deviceid);
PRINTF("\n\nGPU end -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
PRINTFV("subtreeSize:%f ",lb_p->subtreeSize[subtree]);
PRINTFV("time:%f\n",bend[deviceid]);
} /* end loop over subtree */
} /* end if GPU subtree */
/*
* CPU algorithm
*
* Description:
* Performs factorization on subtree of the elimination tree.
* Uses CPU only algorithm. Goal of utilizing CPU while GPU
* is busy. If CPU_only = 1, performs factorization on entire
* tree.
*
* Call one of two functions:
* 1. gpu_factorize_cpu_serial (serial factorization)
* 2. gpu_factorize_cpu_parallel (parallel factorization)
*
*/
if(deviceid == Common->numGPU_physical)
{
/* loop over subtree in CPU device */
for(subtreeid = 0; subtreeid < numSubtreePerDevice; subtreeid++)
{
/* get current subtree & # supernodes */
Int subtree = lb_p->listSubtreePerDevice[subtreeid + deviceid*gb_p->numSubtree];
PRINTF("\n\nCPU start -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
TIMER_START(bstart,deviceid);
check = TEMPLATE2 (CHOLMOD(gpu_factorize_cpu_parallel))( Common, L, gb_p, cpu_p, tree_p, prof_p, deviceid, subtree);
TIMER_END(bstart,bend,deviceid);
PRINTF("\n\nCPU end -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
PRINTFV("subtreeSize:%f ",lb_p->subtreeSize[subtree]);
PRINTFV("time:%f\n",bend[deviceid]);
if(check) gb_p->check[deviceid] = check;
} /* end loop over subtree */
} /* end if CPU subtree */
} /* end loop over devices (OMP threads) */
#ifdef TDEBUG
printf ("subtree factorize time = %lf\n", SuiteSparse_time() - subtree_factorize_time);
#endif
}
}
#ifdef SUBTREE_ONLY
while (has_subtree && gb_p->has_root == TRUE);
#endif
/* early exit if subtree not positive-definite */
for(i=0; i < CHOLMOD_MAX_NUM_PGPUS; i++) {
if(gb_p->check[i]) return (Common->status >= CHOLMOD_OK);
}
if (gb_p->has_root == TRUE)
{
/*
* root algorithm
*
* Description:
* Performs factorization on top-of-tree subtree of the
* elimination tree. Uses CPU/GPU algorithm. Optimized
* for large matrices. Case where subtree does not fit
* the GPU. If root_only = 1, performs factorization on
* entire tree.
*
*/
int deviceid = Common->numGPU_physical+1;
int subtreeid, check = 0;
int numSubtreePerDevice = (int)(lb_p->numSubtreePerDevice[deviceid]);
/* reset Cbuff for root algorithm */
/*cpu_p->C = Cwork->x ;*/
if(deviceid == Common->numGPU_physical+1)
{
/* wait until all subtree are factorized */
/* loop over subtree in root */
for(subtreeid = 0; subtreeid < numSubtreePerDevice; subtreeid++)
{
/* get current subtree & # supernodes */
Int subtree = lb_p->listSubtreePerDevice[subtreeid + deviceid*gb_p->numSubtree];
printf ("root factorization\n");
PRINTF("\n\nroot start -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
TIMER_START(bstart,deviceid);
#ifdef TDEBUG
root_time = SuiteSparse_time();
#endif
check = TEMPLATE2 (CHOLMOD(gpu_factorize_root_parallel))( Common, L, gpu_p, cpu_p, tree_p, subtree );
#ifdef TDEBUG
printf ("root time = %lf\n", SuiteSparse_time() - root_time);
#endif
TIMER_END(bstart,bend,deviceid);
PRINTF("\n\nroot end -\t");
PRINTFV("device:%d ",deviceid);
PRINTFV("subtree:%d ",subtree);
PRINTFV("subtreeSize:%f ",lb_p->subtreeSize[subtree]);
PRINTFV("time:%f\n",bend[deviceid]);
if(check) return (Common->status >= CHOLMOD_OK); /* early exit if not positive-definite */
} /* end loop over subtree */
} /* end if root subtree */
}
TIMER_END(tstart,tend,5);
TIMER_END(tstart,tend,0);
/* Print timers */
PRINTF("\n\n\n");
PRINTFV("total: \t%f\n",tend[0]);
PRINTFV("construct tree: \t%f\n",tend[1]);
PRINTFV("construct subtree: \t%f\n",tend[2]);
PRINTFV("load-balance: \t%f\n",tend[3]);
PRINTFV("init GPU & CPU: \t%f\n",tend[4]);
PRINTFV("factorize: \t%f\n",tend[5]);
PRINTF("\n");
} /* end if parallel factorization */
/* success; matrix is positive definite */
L->minor = L->n ;
PRINTF("\n\n\nfree GPU & CPU..\n");
#ifdef SUITESPARSE_CUDA
/* finalize gpu */
CHOLMOD (gpu_end) (Common) ;
#endif
/* free arrays used for subtree algorithm */
if(gb_p->runType != -1)
{
gb_p->Iwork = CHOLMOD(free) (gb_p->IworkSize, sizeof (Int), gb_p->Iwork, Common) ;
gb_p->Xwork = CHOLMOD(free) (gb_p->XworkSize, sizeof (double), gb_p->Xwork, Common) ;
gb_p->Bwork = CHOLMOD(free) (gb_p->BworkSize, sizeof (struct cholmod_subtree_order_t), gb_p->Bwork, Common) ;
/* if(gb_p->runType != 3 && gb_p->runType != 2) */
{
gb_p->Cwork = CHOLMOD(free) (gb_p->CworkSize, sizeof (double), gb_p->Iwork, Common) ;
gb_p->Mapwork = CHOLMOD(free) (gb_p->MapworkSize, sizeof (Int), gb_p->Iwork, Common) ;
}
}
PRINTF("\n\n\nend t_cholmod_super_numeric..\n\n\n");
return (Common->status >= CHOLMOD_OK) ;
}
#undef TDEBUG
#undef PATTERN
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
red_black_gs.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* This routine assumes a 3-pt (1D), 5-pt (2D), or 7-pt (3D) stencil.
*
*****************************************************************************/
#include "_hypre_struct_ls.h"
#include "red_black_gs.h"
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void *
hypre_RedBlackGSCreate( MPI_Comm comm )
{
hypre_RedBlackGSData *relax_data;
relax_data = hypre_CTAlloc(hypre_RedBlackGSData, 1);
(relax_data -> comm) = comm;
(relax_data -> time_index) = hypre_InitializeTiming("RedBlackGS");
/* set defaults */
(relax_data -> tol) = 1.0e-06;
(relax_data -> max_iter) = 1000;
(relax_data -> rel_change) = 0;
(relax_data -> zero_guess) = 0;
(relax_data -> rb_start) = 1;
(relax_data -> flops) = 0;
(relax_data -> A) = NULL;
(relax_data -> b) = NULL;
(relax_data -> x) = NULL;
(relax_data -> compute_pkg) = NULL;
return (void *) relax_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSDestroy( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
if (relax_data)
{
hypre_StructMatrixDestroy(relax_data -> A);
hypre_StructVectorDestroy(relax_data -> b);
hypre_StructVectorDestroy(relax_data -> x);
hypre_ComputePkgDestroy(relax_data -> compute_pkg);
hypre_FinalizeTiming(relax_data -> time_index);
hypre_TFree(relax_data);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetup( void *relax_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
HYPRE_Int diag_rank;
hypre_ComputePkg *compute_pkg;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_Index diag_index;
hypre_ComputeInfo *compute_info;
/*----------------------------------------------------------
* Find the matrix diagonal
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
hypre_SetIndex3(diag_index, 0, 0, 0);
diag_rank = hypre_StructStencilElementRank(stencil, diag_index);
/*----------------------------------------------------------
* Set up the compute packages
*----------------------------------------------------------*/
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the relax data structure
*----------------------------------------------------------*/
(relax_data -> A) = hypre_StructMatrixRef(A);
(relax_data -> x) = hypre_StructVectorRef(x);
(relax_data -> b) = hypre_StructVectorRef(b);
(relax_data -> diag_rank) = diag_rank;
(relax_data -> compute_pkg) = compute_pkg;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGS( void *relax_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
HYPRE_Int max_iter = (relax_data -> max_iter);
HYPRE_Int zero_guess = (relax_data -> zero_guess);
HYPRE_Int rb_start = (relax_data -> rb_start);
HYPRE_Int diag_rank = (relax_data -> diag_rank);
hypre_ComputePkg *compute_pkg = (relax_data -> compute_pkg);
HYPRE_Int ndim = hypre_StructMatrixNDim(A);
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_dbox;
hypre_Box *b_dbox;
hypre_Box *x_dbox;
HYPRE_Int Ai, Astart, Ani, Anj;
HYPRE_Int bi, bstart, bni, bnj;
HYPRE_Int xi, xstart, xni, xnj;
HYPRE_Int xoff0, xoff1, xoff2, xoff3, xoff4, xoff5;
HYPRE_Real *Ap;
HYPRE_Real *Ap0, *Ap1, *Ap2, *Ap3, *Ap4, *Ap5;
HYPRE_Real *bp;
HYPRE_Real *xp;
hypre_IndexRef start;
hypre_Index loop_size;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int offd[6];
HYPRE_Int iter, rb, redblack, d;
HYPRE_Int compute_i, i, j, ii, jj, kk;
HYPRE_Int ni, nj, nk;
/*----------------------------------------------------------
* Initialize some things and deal with special cases
*----------------------------------------------------------*/
hypre_BeginTiming(relax_data -> time_index);
hypre_StructMatrixDestroy(relax_data -> A);
hypre_StructVectorDestroy(relax_data -> b);
hypre_StructVectorDestroy(relax_data -> x);
(relax_data -> A) = hypre_StructMatrixRef(A);
(relax_data -> x) = hypre_StructVectorRef(x);
(relax_data -> b) = hypre_StructVectorRef(b);
(relax_data -> num_iterations) = 0;
/* if max_iter is zero, return */
if (max_iter == 0)
{
/* if using a zero initial guess, return zero */
if (zero_guess)
{
hypre_StructVectorSetConstantValues(x, 0.0);
}
hypre_EndTiming(relax_data -> time_index);
return hypre_error_flag;
}
else
{
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
/* get off-diag entry ranks ready */
i = 0;
for (j = 0; j < stencil_size; j++)
{
if (j != diag_rank)
{
offd[i] = j;
i++;
}
}
}
/*----------------------------------------------------------
* Do zero_guess iteration
*----------------------------------------------------------*/
rb = rb_start;
iter = 0;
if (zero_guess)
{
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
Ap = hypre_StructMatrixBoxData(A, i, diag_rank);
bp = hypre_StructVectorBoxData(b, i);
xp = hypre_StructVectorBoxData(x, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_BoxGetSize(compute_box, loop_size);
/* Are we relaxing index start or start+(1,0,0)? */
redblack = rb;
for (d = 0; d < ndim; d++)
{
redblack += hypre_IndexD(start, d);
}
redblack = hypre_abs(redblack) % 2;
Astart = hypre_BoxIndexRank(A_dbox, start);
bstart = hypre_BoxIndexRank(b_dbox, start);
xstart = hypre_BoxIndexRank(x_dbox, start);
ni = hypre_IndexX(loop_size);
nj = hypre_IndexY(loop_size);
nk = hypre_IndexZ(loop_size);
Ani = hypre_BoxSizeX(A_dbox);
bni = hypre_BoxSizeX(b_dbox);
xni = hypre_BoxSizeX(x_dbox);
Anj = hypre_BoxSizeY(A_dbox);
bnj = hypre_BoxSizeY(b_dbox);
xnj = hypre_BoxSizeY(x_dbox);
if (ndim < 3)
{
nk = 1;
if (ndim < 2)
{
nj = 1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] = bp[bi] / Ap[Ai];
}
}
}
}
}
}
rb = (rb + 1) % 2;
iter++;
}
/*----------------------------------------------------------
* Do regular iterations
*----------------------------------------------------------*/
while (iter < 2*max_iter)
{
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
Ap = hypre_StructMatrixBoxData(A, i, diag_rank);
bp = hypre_StructVectorBoxData(b, i);
xp = hypre_StructVectorBoxData(x, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_BoxGetSize(compute_box, loop_size);
/* Are we relaxing index start or start+(1,0,0)? */
redblack = rb;
for (d = 0; d < ndim; d++)
{
redblack += hypre_IndexD(start, d);
}
redblack = hypre_abs(redblack) % 2;
Astart = hypre_BoxIndexRank(A_dbox, start);
bstart = hypre_BoxIndexRank(b_dbox, start);
xstart = hypre_BoxIndexRank(x_dbox, start);
ni = hypre_IndexX(loop_size);
nj = hypre_IndexY(loop_size);
nk = hypre_IndexZ(loop_size);
Ani = hypre_BoxSizeX(A_dbox);
bni = hypre_BoxSizeX(b_dbox);
xni = hypre_BoxSizeX(x_dbox);
Anj = hypre_BoxSizeY(A_dbox);
bnj = hypre_BoxSizeY(b_dbox);
xnj = hypre_BoxSizeY(x_dbox);
if (ndim < 3)
{
nk = 1;
if (ndim < 2)
{
nj = 1;
}
}
switch(stencil_size)
{
case 7:
Ap5 = hypre_StructMatrixBoxData(A, i, offd[5]);
Ap4 = hypre_StructMatrixBoxData(A, i, offd[4]);
xoff5 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[5]]);
xoff4 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[4]]);
case 5:
Ap3 = hypre_StructMatrixBoxData(A, i, offd[3]);
Ap2 = hypre_StructMatrixBoxData(A, i, offd[2]);
xoff3 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[3]]);
xoff2 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[2]]);
case 3:
Ap1 = hypre_StructMatrixBoxData(A, i, offd[1]);
Ap0 = hypre_StructMatrixBoxData(A, i, offd[0]);
xoff1 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[1]]);
xoff0 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[0]]);
break;
}
switch(stencil_size)
{
case 7:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1] -
Ap2[Ai] * xp[xi + xoff2] -
Ap3[Ai] * xp[xi + xoff3] -
Ap4[Ai] * xp[xi + xoff4] -
Ap5[Ai] * xp[xi + xoff5]) / Ap[Ai];
}
}
}
break;
case 5:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1] -
Ap2[Ai] * xp[xi + xoff2] -
Ap3[Ai] * xp[xi + xoff3]) / Ap[Ai];
}
}
}
break;
case 3:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1]) / Ap[Ai];
}
}
}
break;
}
}
}
}
rb = (rb + 1) % 2;
iter++;
}
(relax_data -> num_iterations) = iter / 2;
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(relax_data -> flops);
hypre_EndTiming(relax_data -> time_index);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetTol( void *relax_vdata,
HYPRE_Real tol )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
(relax_data -> tol) = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetMaxIter( void *relax_vdata,
HYPRE_Int max_iter )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
(relax_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetZeroGuess( void *relax_vdata,
HYPRE_Int zero_guess )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
(relax_data -> zero_guess) = zero_guess;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetStartRed( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
(relax_data -> rb_start) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetStartBlack( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata;
(relax_data -> rb_start) = 0;
return hypre_error_flag;
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction. They are
/// expected to atomically update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: Variable(Variable), PrivateVariable(PrivateVariable),
ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {}
/// Returns the type of the element being reduced.
Type *getElementType() const {
return Variable->getType()->getPointerElementType();
}
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a global flag \p Namein the module with initial value \p Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Preheader = nullptr;
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Body = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
BasicBlock *After = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const {
assert(isValid() && "Requires a valid canonical loop");
return Preheader;
}
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return Body;
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return After;
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
GB_binop__bclr_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint32)
// C=scalar+B GB (_bind1st__bclr_uint32)
// C=scalar+B' GB (_bind1st_tran__bclr_uint32)
// C=A+scalar GB (_bind2nd__bclr_uint32)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ic.c | /*
* ============================ ic =====================
* IC sets the initial condition
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* q1 real array IC data. Set 1..nx here;
* [0],[nx+1] = ghost zones
* if 1 ghost point on each side
* dx real grid spacing
* i1,i2 integers indices bounding array data
* nx integer number of grid points
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
void ic(rho,theta_d,u,v,w,p,dx,dy,dz,i1,i2,j1,j2,k1,k2,nx,ny,nz,x0,y0,z0,BC_WIDTH)
int i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH;
float dx,dy,dz,rho[],u[][ny][nz],v[][j2+2][nz],w[][ny][k2+2],p[][ny][nz],theta_d[][ny][nz],x0,y0,z0;
{
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
int i,j,k,m;
float x[nx],y[ny],z[nz],d[nx][ny][nz];
float theta_0 = 300;
float g = 9.81;
float cp = 1004;
float Rd = 287;
float P0 = 100000;
float T,P,rm;
int delta_theta[2],delta_v[2],xstart[2],ystart[2],zstart[2],xradius[2],yradius[2],zradius[2];
float upertur = 2.0;
srand(0.0);
/*printf(" Enter the first temperature perturbation value \n");
scanf("%d",&delta_theta[0]);
printf(" Enter the first temperature perturbation x coordinate \n");
scanf("%d",&xstart[0]);
printf(" Enter the first temperature perturbation z coordinate \n");
scanf("%d",&zstart[0]);
printf(" Enter the first temperature perturbation x radius \n");
scanf("%d",&xradius[0]);
printf(" Enter the first temperature perturbation z radius \n");
scanf("%d",&zradius[0]);
printf(" Enter the second temperature perturbation value \n");
scanf("%d",&delta_theta[1]);
printf(" Enter the second temperature perturbation x coordinate \n");
scanf("%d",&xstart[1]);
printf(" Enter the second temperature perturbation z coordinate \n");
scanf("%d",&zstart[1]);
printf(" Enter the second temperature perturbation x radius \n");
scanf("%d",&xradius[1]);
printf(" Enter the second temperature perturbation z radius \n");
scanf("%d",&zradius[1]);*/
delta_theta[0] = -25;
delta_theta[1] = -25;
delta_v[0] = -40;
delta_v[1] = 40;
xstart[0] = 25;
xstart[1] = 14975;
ystart[0] = 7525;
ystart[1] = 7525;
zstart[0] = 1525;
zstart[1] = 1525;
xradius[0] = 3500;
xradius[1] = 3500;
yradius[0] = 999999;
yradius[1] = 999999;
zradius[0] = 1750;
zradius[1] = 1750;
#pragma omp parallel for shared(u) private(i,j,k)
for (i=i1;i<=i2+1;i++) /*u*/
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
u[i][j][k] = 0;
}
for (i=i1+2;i<=i2-1;i++) /*u*/
for (j=j1+1;j<=j2-1;j++)
for (k=k1+1;k<=k2-1;k++)
{
u[i][j][k] = u[i][j][k] + upertur * ( rand() / (RAND_MAX + 1.0) ) - upertur/2.0;
}
#pragma omp parallel for shared(v) private(i,j,k)
for (i=i1;i<=i2;i++) /*v*/
for (j=j1-1;j<=j2+1;j++)
for (k=k1;k<=k2;k++)
{
v[i][j][k] = 0;
}
#pragma omp parallel for shared(w) private(i,j,k)
for (i=i1;i<=i2;i++) /*w*/
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2+1;k++)
{
w[i][j][k] = 0;
}
#pragma omp parallel for shared(p) private(i,j,k)
for (i=i1;i<=i2;i++) /*p*/
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
p[i][j][k] = 0;
}
for (i=i1;i<=i2;i++)
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
x[i] = dx/2+dx*(i-i1);
y[j] = dy/2+dy*(j-j1);
z[k] = dz/2+dz*(k-k1);
theta_d[i][j][k] = 0;
for (m=0;m<2;m++)
{
rm = sqrt(pow((x[i]-xstart[m])/xradius[m],2.0)+pow((y[j]-ystart[m])/yradius[m],2.0)+pow((z[k]-zstart[m])/zradius[m],2.0));
if (rm <= 1.0)
{
theta_d[i][j][k] = theta_d[i][j][k] + delta_theta[m]/2.0*(cos(rm*M_PI)+1);
}
}
}
for (i=i1;i<=i2;i++)
for (j=j1;j<=j2+1;j++)
for (k=k1;k<=k2;k++)
{
x[i] = dx/2+dx*(i-i1);
y[j] = dy/2+dy*(j-j1);
z[k] = dz/2+dz*(k-k1);
for (m=0;m<2;m++)
{
rm = sqrt(pow((x[i]-xstart[m])/xradius[m],2.0)+pow((y[j]-ystart[m])/yradius[m],2.0)+pow((z[k]-zstart[m])/zradius[m],2.0));
if (rm <= 1.0)
{
v[i][j][k] = v[i][j][k] + delta_v[m]/2.0*(cos(rm*M_PI)+1);
}
}
}
for (k=k1-1;k<=k2+1;k++ ) /*rho*/
{
z[k] = dz/2+dz*(k-k1);
T = 300.0-g/cp*z[k];
P = P0*pow(T/theta_0,cp/Rd);
rho[k] = P/Rd/T;
}
return;
}
|
normal.c | /* =============================================================================
*
* normal.c
* -- Implementation of normal k-means clustering algorithm
*
* =============================================================================
*
* Author:
*
* Wei-keng Liao
* ECE Department, Northwestern University
* email: wkliao@ece.northwestern.edu
*
*
* Edited by:
*
* Jay Pisharath
* Northwestern University.
*
* Chi Cao Minh
* Stanford University
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include "common.h"
#include "normal.h"
#include "random.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "util.h"
#include "stm/lib_hicamp.h"
double global_time = 0.0;
typedef struct args {
float** feature;
int nfeatures;
int npoints;
int nclusters;
int* membership;
float** clusters;
int** new_centers_len;
float** new_centers;
} args_t;
float global_delta;
long global_i; /* index into task queue */
#define CHUNK 3
/* =============================================================================
* work
* =============================================================================
*/
static void
work (void* argPtr)
{
TM_THREAD_ENTER();
args_t* args = (args_t*)argPtr;
float** feature = args->feature;
int nfeatures = args->nfeatures;
int npoints = args->npoints;
int nclusters = args->nclusters;
int* membership = args->membership;
float** clusters = args->clusters;
int** new_centers_len = args->new_centers_len;
float** new_centers = args->new_centers;
float delta = 0.0;
int index;
int i;
int j;
int start;
int stop;
int myId;
myId = thread_getId();
start = myId * CHUNK;
while (start < npoints) {
stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints);
for (i = start; i < stop; i++) {
index = common_findNearestPoint(feature[i],
nfeatures,
clusters,
nclusters);
/*
* If membership changes, increase delta by 1.
* membership[i] cannot be changed by other threads
*/
if (membership[i] != index) {
delta += 1.0;
}
/* Assign the membership to object i */
/* membership[i] can't be changed by other thread */
membership[i] = index;
/* Update new cluster centers : sum of objects located within */
TM_BEGIN();
printf("DOing shared write\n");
TM_SHARED_WRITE_I(*new_centers_len[index],
TM_SHARED_READ_I(*new_centers_len[index]) + 1);
printf("SW DONE\n");
for (j = 0; j < nfeatures; j++) {
TM_SHARED_WRITE_F(
new_centers[index][j],
(TM_SHARED_READ_F(new_centers[index][j]) + feature[i][j])
);
}
TM_END();
}
/* Update task queue */
if (start + CHUNK < npoints) {
TM_BEGIN();
start = (int)TM_SHARED_READ_L(global_i);
TM_SHARED_WRITE_L(global_i, (long)(start + CHUNK));
TM_END();
} else {
break;
}
}
TM_BEGIN();
TM_SHARED_WRITE_F(global_delta, TM_SHARED_READ_F(global_delta) + delta);
TM_END();
TM_THREAD_EXIT();
}
/* =============================================================================
* normal_exec
* =============================================================================
*/
float**
normal_exec (int nthreads,
float** feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int* membership,
random_t* randomPtr) /* out: [npoints] */
{
int i;
int j;
int loop = 0;
int** new_centers_len; /* [nclusters]: no. of points in each cluster */
float delta;
float** clusters; /* out: [nclusters][nfeatures] */
float** new_centers; /* [nclusters][nfeatures] */
void* alloc_memory = NULL;
args_t args;
TIMER_T startTime;
TIMER_T stopTime;
/* Allocate space for returning variable clusters[] */
clusters = (float**)SEQ_MALLOC(nclusters * sizeof(float*));
assert(clusters);
clusters[0] = (float*)SEQ_MALLOC(nclusters * nfeatures * sizeof(float));
assert(clusters[0]);
for (i = 1; i < nclusters; i++) {
clusters[i] = clusters[i-1] + nfeatures;
}
/* Randomly pick cluster centers */
for (i = 0; i < nclusters; i++) {
int n = (int)(random_generate(randomPtr) % npoints);
for (j = 0; j < nfeatures; j++) {
clusters[i][j] = feature[n][j];
}
}
for (i = 0; i < npoints; i++) {
membership[i] = -1;
}
/*
* Need to initialize new_centers_len and new_centers[0] to all 0.
* Allocate clusters on different cache lines to reduce false sharing.
*/
{
int cluster_size = sizeof(int) + sizeof(float) * nfeatures;
const int cacheLineSize = 32;
cluster_size += (cacheLineSize-1) - ((cluster_size-1) % cacheLineSize);
alloc_memory = hccalloc(nclusters, cluster_size);
printf("allocing new centers len\n");
new_centers_len = (int**) SEQ_MALLOC(nclusters * sizeof(int*));
printf("alloc new centers done %p\n", new_centers_len);
new_centers = (float**) SEQ_MALLOC(nclusters * sizeof(float*));
assert(alloc_memory && new_centers && new_centers_len);
for (i = 0; i < nclusters; i++) {
new_centers_len[i] = (int*)((char*)alloc_memory + cluster_size * i);
new_centers[i] = (float*)((char*)alloc_memory + cluster_size * i + sizeof(int));
}
}
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
//GOTO_SIM();
//TIMER_READ(start);
BEGIN_ROI;
do {
delta = 0.0;
args.feature = feature;
args.nfeatures = nfeatures;
args.npoints = npoints;
args.nclusters = nclusters;
args.membership = membership;
args.clusters = clusters;
args.new_centers_len = new_centers_len;
args.new_centers = new_centers;
global_i = nthreads * CHUNK;
global_delta = delta;
#ifdef OTM
#pragma omp parallel
{
work(&args);
}
#else
thread_start(work, &args);
#endif
delta = global_delta;
/* Replace old cluster centers with new_centers */
for (i = 0; i < nclusters; i++) {
for (j = 0; j < nfeatures; j++) {
if (new_centers_len[i] > 0) {
clusters[i][j] = new_centers[i][j] / *new_centers_len[i];
}
new_centers[i][j] = 0.0; /* set back to 0 */
}
*new_centers_len[i] = 0; /* set back to 0 */
}
delta /= npoints;
} while ((delta > threshold) && (loop++ < 500));
END_ROI;
//TIMER_READ(stop);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
//GOTO_REAL();
global_time += TIMER_DIFF_SECONDS(startTime, stopTime);
SEQ_FREE(alloc_memory);
SEQ_FREE(new_centers);
SEQ_FREE(new_centers_len);
return clusters;
}
/* =============================================================================
*
* End of normal.c
*
* =============================================================================
*/
|
volumeramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2016-2020 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#pragma once
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/volume/volume.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in grid index space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T* src = inVolume->getDataTyped();
U* dst = outDistanceField->getDataTyped();
const i64vec3 srcDim{inVolume->getDimensions()};
const i64vec3 dstDim{outDistanceField->getDimensions()};
const i64vec3 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]};
const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}};
const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"volumeRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal volume will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform"));
}
util::IndexMapper<3, int64> srcInd(srcDim);
util::IndexMapper<3, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y, const int64 z) {
return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] =
std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist));
}
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.3);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.y);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y, z)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// third pass, scan z direction
// for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ
// result: min distance in x and y direction
callback(0.6);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.z);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 y = 0; y < dstDim.y; ++y) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 z = 0; z < dstDim.z; ++z) {
buff[z] = dst[dstInd(x, y, z)];
}
for (int64 z = 0; z < dstDim.z; ++z) {
auto d = buff[z];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1;
const auto rStart = std::min(rMax, z - 1);
const auto rEnd = std::min(rMax, dstDim.z - z);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[z + n] + squareVoxelSize.z * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// scale data
callback(0.9);
const int64 volSize = dstDim.x * dstDim.y * dstDim.z;
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 i = 0; i < volSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample) {
util::volumeRAMDistanceTransform(
inVolume, outDistanceField, basis, upsample,
[](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U& squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale,
ProgressCallback progress) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
using ValueType = util::PrecisionValueType<decltype(vrprecision)>;
const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float& squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float& squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
|
geneticalgorithm.h | #ifndef GENETICALGORITHM_H
#define GENETICALGORITHM_H
#include "../entities/problem.h"
#include "globalsolver.h"
#include "parameters.h"
#include <iostream>
#include <memory>
template <class T>
class GeneticAlgorithm : public GlobalSolver<T>
{
public:
GeneticAlgorithm(const GeneticAlgorithmParameters ¶meters, std::shared_ptr<Problem<T>> prob) : GlobalSolver<T>(parameters.numbIndividuals, prob)
{
if (this->numberOfAgents < 2) {
std::cerr << "The number of individuals needs to be equal or higher than 2" << std::endl;
exit(EXIT_FAILURE);
}
this->crossoverRate = parameters.crossoverRate;
this->mutationRate = parameters.mutationRate;
this->crossoverType = parameters.crossoverType;
this->selectionType = parameters.selectionType;
this->mutationType = parameters.mutationType;
puts("Genetic Algorithm instantiated");
}
void solve()
{
if (this->maxIterations == 0 && this->runningTime == 0) {
std::cerr << "Use \"setMaxIterations(int)\" or \"setRunningTime(double)\" to "
"define a stopping criteria!"
<< std::endl;
exit(EXIT_FAILURE);
} else
std::cout << "Starting Genetic Algorithm search procedure" << std::endl;
if (this->mutationType == MutationType::POLYNOMIAL && this->etaM == -FLT_MIN) {
std::cerr << "You must set the polynomial mutation index through "
"\"setEtaM(float)\" function"
<< std::endl;
exit(EXIT_FAILURE);
} else if (this->crossoverType == CrossoverType::SIMULATED_BINARY && this->etaC == -FLT_MIN) {
std::cerr << "You must set the SBX operator index through \"setEtaC(float)\" "
"function"
<< std::endl;
exit(EXIT_FAILURE);
}
utils::startTimeCounter();
// Current population
std::vector<std::vector<double>> individuals(this->numberOfAgents);
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++)
this->problem->fillRandomDecisionVariables(individuals[i]);
// Stores the objective value of each individual
std::vector<double> individualsFitness(this->numberOfAgents);
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++) {
switch (this->problem->getRepType()) {
case RepresentationType::DIRECT:
individualsFitness[i] = this->problem->evaluate(individuals[i]);
break;
case RepresentationType::INDIRECT:
std::shared_ptr<Solution<T>> sol = this->problem->construct(individuals[i]);
individualsFitness[i] = sol->getFitness();
break;
}
#pragma omp critical
this->updateGlobalBest(individuals[i], individualsFitness[i], true);
}
// Container to store the offspring after the crossover (child1 at newIndividuals1
// and child2 at newIndividuals2)
std::vector<std::vector<double>> newIndividuals1 = individuals;
std::vector<std::vector<double>> newIndividuals2 = individuals;
// Objective values for the offspring
std::vector<double> newIndividuals1Fitness = individualsFitness;
std::vector<double> newIndividuals2Fitness = individualsFitness;
int iteration = -1;
while (iteration++ < this->maxIterations || utils::getCurrentTime() < this->runningTime) {
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++) {
// Select two individuals
int indexParent1, indexParent2;
selection(indexParent1, indexParent2, individualsFitness);
if (utils::getRandom() <= this->crossoverRate) {
// Perform crossover
crossover(individuals[indexParent1], individuals[indexParent2], newIndividuals1[i], newIndividuals2[i]);
// Mutate the new individuals
mutate(newIndividuals1[i], newIndividuals2[i]);
} else {
newIndividuals1[i] = individuals[indexParent1];
newIndividuals2[i] = individuals[indexParent2];
}
}
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++) {
// Evaluate child1
switch (this->problem->getRepType()) {
case RepresentationType::DIRECT:
newIndividuals1Fitness[i] = this->problem->evaluate(newIndividuals1[i]);
newIndividuals2Fitness[i] = this->problem->evaluate(newIndividuals2[i]);
break;
case RepresentationType::INDIRECT:
std::shared_ptr<Solution<T>> sol1 = this->problem->construct(newIndividuals1[i]);
newIndividuals1Fitness[i] = sol1->getFitness();
std::shared_ptr<Solution<T>> sol2 = this->problem->construct(newIndividuals2[i]);
newIndividuals2Fitness[i] = sol2->getFitness();
if (utils::getCurrentTime() > 30) {
sol1->localSearch();
sol2->localSearch();
}
break;
}
#pragma omp critical
{
this->updateGlobalBest(newIndividuals1[i], newIndividuals1Fitness[i], true);
this->updateGlobalBest(newIndividuals2[i], newIndividuals2Fitness[i], true);
}
}
// Pick the best individuals in the offspring to be the next generation
switch (this->problem->getStrategy()) {
case OptimizationStrategy::MINIMIZE: {
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++) {
if (newIndividuals1Fitness[i] < newIndividuals2Fitness[i] && newIndividuals1Fitness[i] < individualsFitness[i]) {
individuals[i] = newIndividuals1[i];
} else if (newIndividuals2Fitness[i] < newIndividuals1Fitness[i] && newIndividuals2Fitness[i] < individualsFitness[i]) {
individuals[i] = newIndividuals2[i];
}
}
} break;
case OptimizationStrategy::MAXIMIZE: {
#pragma omp parallel for
for (int i = 0; i < this->numberOfAgents; i++) {
if (newIndividuals1Fitness[i] > newIndividuals2Fitness[i] && newIndividuals1Fitness[i] > individualsFitness[i]) {
individuals[i] = newIndividuals1[i];
} else if (newIndividuals2Fitness[i] > newIndividuals1Fitness[i] && newIndividuals2Fitness[i] > individualsFitness[i]) {
individuals[i] = newIndividuals2[i];
}
}
} break;
}
}
std::cout << "Best solution " << this->globalBestFitness << " Running time: " << utils::getCurrentTime() << std::endl << "Best solution decision variables: ";
utils::printVector(this->globalBest);
}
void crossover(std::vector<T> const &parent1, std::vector<T> const &parent2, std::vector<T> &child1, std::vector<T> &child2)
{
switch (this->crossoverType) {
case CrossoverType::UNIFORM:
uniformCrossover(parent1, parent2, child1, child2);
break;
case CrossoverType::SIMULATED_BINARY:
simulatedBinaryCrossover(parent1, parent2, child1, child2);
break;
case CrossoverType::ONE_POINT:
std::cout << "One Point Crossover" << std::endl;
break;
}
}
void simulatedBinaryCrossover(const std::vector<T> &parent1, const std::vector<T> &parent2, std::vector<T> &child1, std::vector<T> &child2)
{
double EPS = 1.0e-14;
// y1 stores the value for the 1st child; y2 the value for the 2nd child; yl (notice
// it's an L not a 1) holds the lower limit for the variable yu holds the upper limit
double y1, y2, yl, yu;
// betaq, in the paper, is the symbol beta with a line above
double alpha, beta, betaq;
if (utils::getRandom() <= this->crossoverRate) // roll the dices. Should we apply a crossover?
{
// Crossover operations for the MS crossover
for (int i = 0; i < this->problem->getDimension(); i++) // for each variable of a solution (individual)
{
// according to the paper, each variable in a solution has a 50%
// chance of changing its value. This should be removed when dealing
// with one-dimensional solutions.
if (utils::getRandom() <= 0.5) {
// the following if/else block puts the lowest value between
// parent1 and parent2 in y1 and the other in y2.
if (parent1[i] < parent2[i]) {
y1 = parent1[i];
y2 = parent2[i];
} else {
y1 = parent2[i];
y2 = parent1[i];
}
// if the value in parent1 is not the same of parent2
if (fabs(parent1[i] - parent2[i]) > EPS) {
yl = this->problem->getLb()[i]; // lower limit of the i-th
// variable of an individual.
// Note that yl != y1.
yu = this->problem->getUb()[i]; // upper limit of the i-th
// variable of an individual
// Calculation for the first child
double rand = utils::getRandom();
beta = 1.0 + (2.0 * (y1 - yl) / (y2 - y1)); // it differs from the paper here. The
// paper uses one value of beta for
// calculating both children. Here, we
// use one beta for each child.
alpha = 2.0 - pow(beta,
-(etaC + 1.0)); // calculation of alpha as
// described in the paper
// calculation of betaq as described in the paper
if (rand <= (1.0 / alpha)) {
betaq = pow((rand * alpha), (1.0 / (etaC + 1.0)));
} else {
betaq = pow((1.0 / (2.0 - rand * alpha)), (1.0 / (etaC + 1.0)));
}
child1[i] = 0.5 * ((y1 + y2) - betaq * (y2 - y1)); // calculation of the first child as
// described in the paper
// Calculations for the second child
beta = 1.0 + (2.0 * (yu - y2) / (y2 - y1)); // differs from the paper. The
// second value of beta uses the
// upper limit (yu) and the maximum
// between parent1 and parent2 (y2)
alpha = 2.0 - pow(beta,
-(etaC + 1.0)); // calculation of alpha as
// described in the paper
// calculation of betaq as described in the paper
if (rand <= (1.0 / alpha)) {
betaq = pow((rand * alpha), (1.0 / (etaC + 1.0)));
} else {
betaq = pow((1.0 / (2.0 - rand * alpha)), (1.0 / (etaC + 1.0)));
}
child2[i] = 0.5 * ((y1 + y2) + betaq * (y2 - y1)); // calculation of the second child
// as described in the paper ensures
// the value of both children are in
// the correct bounds [yl, yu].
// According to the paper, this
// should not be needed.
}
// if the i-th variable has the same value in both parents
else {
child1[i] = parent1[i];
child2[i] = parent2[i];
}
} else // 50% chance of changing values. In the case random > 0.5,
// the children should have the same value as the parents
{
child1[i] = parent1[i];
child2[i] = parent2[i];
}
}
} else {
child1 = parent1;
child2 = parent2;
}
}
void uniformCrossover(const std::vector<T> &parent1, const std::vector<T> &parent2, std::vector<T> &child1, std::vector<T> &child2)
{
for (size_t i = 0; i < this->problem->getDimension(); i++) {
if (utils::getRandom() <= 0.5) {
child1[i] = parent1[i];
child2[i] = parent2[i];
} else {
child1[i] = parent2[i];
child2[i] = parent1[i];
}
}
}
void selection(int &indexParent1, int &indexParent2, const std::vector<double> &individualsFitness)
{
switch (this->selectionType) {
case SelectionType::FITNESS_PROPORTIONATE: {
std::cout << "Fitness proportionate" << std::endl;
} break;
case SelectionType::TOURNAMENT: {
indexParent1 = tournamentSelection(individualsFitness);
indexParent2 = tournamentSelection(individualsFitness);
} break;
}
}
int tournamentSelection(const std::vector<double> &individualsFitness)
{
int indexIndividual1 = utils::getRandom(this->numberOfAgents - 1);
int indexIndividual2 = utils::getRandom(this->numberOfAgents - 2);
if (indexIndividual2 >= indexIndividual1)
indexIndividual2++;
if (individualsFitness[indexIndividual1] > individualsFitness[indexIndividual2])
return indexIndividual1;
else
return indexIndividual2;
}
void mutate(std::vector<double> &child1, std::vector<double> &child2)
{
switch (this->mutationType) {
case MutationType::RANDOM_MUTATION: {
randomMutation(child1);
randomMutation(child1);
} break;
case MutationType::POLYNOMIAL: {
polynomialMutation(child1);
polynomialMutation(child2);
} break;
}
}
void randomMutation(std::vector<double> &individual)
{
for (int i = 0; i < this->problem->getDimension(); i++)
if (utils::getRandom() <= mutationRate)
individual[i] = this->problem->getRandomDecisionVariableAt(i);
}
void polynomialMutation(std::vector<double> &individual)
{
for (int i = 0; i < this->problem->getDimension(); i++) {
if (utils::getRandom() <= mutationRate) {
double rand = utils::getRandom();
if (rand <= 0.5) {
double leftValue = individual[i] - this->problem->getLb()[i];
double sigma_l = pow(2 * rand, 1. / (1 + etaM)) - 1;
individual[i] = individual[i] + (sigma_l * leftValue);
} else {
double rightValue = this->problem->getUb()[i] - individual[i]; // 1 is the upper bound
// for the ith variable
double sigma_r = 1 - pow(2 * (1 - rand), 1. / (1 + etaM));
individual[i] = individual[i] + (sigma_r * rightValue);
}
}
}
}
void setEtaM(float value) { etaM = value; }
void setEtaC(float value) { etaC = value; }
private:
float crossoverRate;
float mutationRate;
float etaM = -FLT_MIN;
float etaC = -FLT_MIN;
CrossoverType crossoverType;
SelectionType selectionType;
MutationType mutationType;
};
#endif // GENETICALGORITHM_H
|
omp_zsyrk_batch.c | /**
* @file omp_zsyrk_batch.c
*
* @brief BBLAS omp_zsyrk_batch double _Complex routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @precisions normal z -> c d s
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define COMPLEX
/**
Purpose
-------
<b>zsyrk_batch</b> is an OpenMP version of zsyrk_batch.
It performs the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayA[i]**T + beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA[i]**T *arrayA[i] + beta[i]*arrayC[i],
where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym-
metric matirix and arrayA[i] is N[i] by K[i] matrix in the
first case and K[i] by N[i] matrix in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i] is to
be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
the matrix is to be referenced.
- = 'BblasLower' Only the lower triangular part of
the matrix is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayA[i]**T + beta[i]*arrayC[i].
- = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrix arrayA[i],
and upon entry with trans[i] = 'BblasTrans',
K[i] specifies the number of rows of the matrix arrayA[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>complex_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>complex_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a COMPLEX_16 matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the symmetric
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper tri-
angular part of the updated matrix.
Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the symmetric matrix and the
strictly upper triangular part of arrayC[i] is not refer-
enced. On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith zsyrk in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_zsyrk_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const BBLAS_Complex64_t *alpha,
const BBLAS_Complex64_t **arrayA, const int *lda,
const BBLAS_Complex64_t *beta, BBLAS_Complex64_t **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "zsyrk_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
} else
{
LDA = K[first_index];
}
if (lda[first_index] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 || (K[first_index] == 0 ||
alpha[first_index] == (BBLAS_Complex64_t)0.0 ||
beta[first_index] == (BBLAS_Complex64_t)1.0))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_zsyrk */
cblas_zsyrk(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
CBLAS_SADDR(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
CBLAS_SADDR(beta[first_index]),
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans)
{
LDA = N[batch_iter];
} else
{
LDA = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 ||
((K[batch_iter] == 0 || alpha[batch_iter] == (BBLAS_Complex64_t)0.0) &&
(beta[batch_iter] == (BBLAS_Complex64_t)1.0)))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_zsyrk(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
CBLAS_SADDR(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
CBLAS_SADDR(beta[batch_iter]),
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_V_P_STRATEGY_H
#define KRATOS_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "custom_utilities/solver_settings.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class VPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(VPStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
VPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
std::cout << "VPStrategy INITIALIZE STRATEGY" << std::endl;
InitializeStrategy(rSolverConfig);
}
VPStrategy(ModelPart &rModelPart,
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
unsigned int DomainSize = 2) : BaseType(rModelPart)
{
KRATOS_TRY;
KRATOS_CATCH("");
}
/// Destructor.
virtual ~VPStrategy() {}
virtual int Check() override
{
return false;
}
virtual bool SolveSolutionStep() override
{
return false;
}
virtual void FinalizeSolutionStep() override {}
virtual void InitializeSolutionStep() override {}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
KRATOS_CATCH("");
}
void SetBlockedAndIsolatedFlags()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
std::vector<array_1d<double, 3>> nodesCoordinates;
nodesCoordinates.resize(numNodes);
(itElem)->Set(BLOCKED, false);
(itElem)->Set(ISOLATED, false);
unsigned int freeSurfaceNodes = 0;
unsigned int freeSurfaceRigidNodes = 0;
unsigned int rigidNodes = 0;
unsigned int isolatedNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if (itElem->GetGeometry()[i].Is(FREE_SURFACE))
{
freeSurfaceNodes++;
if (itElem->GetGeometry()[i].Is(RIGID))
{
freeSurfaceRigidNodes++;
}
}
else if (itElem->GetGeometry()[i].Is(RIGID))
{
rigidNodes++;
}
nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates();
ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS);
if (neighb_elems.size() == 1)
{
isolatedNodes++;
}
}
if (dimension == 3)
{
double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C)
a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D)
a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D)
a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]);
b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]);
c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]);
double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]);
b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]);
c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]);
double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)));
double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes))
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
}
if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1))
{
(itElem)->Set(ISOLATED, true);
(itElem)->Set(BLOCKED, false);
}
}
}
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double Dt = rCurrentProcessInfo[DELTA_TIME];
noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration;
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
virtual void UpdateStressStrain() {}
virtual void Clear() override {}
///@}
///@name Access
///@{
virtual void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "VPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "VPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables.
*/
virtual bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm)
{
return false;
}
virtual bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP)
{
return false;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
const Vector &N = row(NContainer, 0);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
double ComputeVelocityNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
double NormV = 0.00;
#pragma omp parallel for reduction(+ \
: NormV)
for (int i_node = 0; i_node < n_nodes; ++i_node)
{
const auto it_node = rModelPart.NodesBegin() + i_node;
const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY);
for (unsigned int d = 0; d < 3; ++d)
{
NormV += r_vel[d] * r_vel[d];
}
}
NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
const double zero_tol = 1.0e-12;
if (NormV < zero_tol)
NormV = 1.00;
return NormV;
}
double ComputePressureNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
double NormP = 0.00;
#pragma omp parallel for reduction(+ \
: NormP)
for (int i_node = 0; i_node < n_nodes; ++i_node)
{
const auto it_node = rModelPart.NodesBegin() + i_node;
const double Pr = it_node->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
const double zero_tol = 1.0e-12;
if (NormP < zero_tol)
NormP = 1.00;
return NormP;
}
virtual bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
return false;
}
virtual bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP)
{
return false;
}
virtual bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep)
{
return false;
}
virtual bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
return false;
}
virtual bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep)
{
return false;
}
virtual bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
return false;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
VPStrategy &operator=(VPStrategy const &rOther) {}
/// Copy constructor.
VPStrategy(VPStrategy const &rOther) {}
///@}
}; /// Class VPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_V_P_STRATEGY_H
|
Example_acquire_release.1.c | /*
* @@name: acquire_release.1.c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#include <omp.h>
int main()
{
int x = 0, y = 0;
#pragma omp parallel num_threads(2)
{
int thrd = omp_get_thread_num();
if (thrd == 0) {
x = 10;
#pragma omp critical
{ y = 1; }
} else {
int tmp = 0;
while (tmp == 0) {
#pragma omp critical
{ tmp = y; }
}
printf("x = %d\n", x); // always "x = 10"
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.