source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int check_results(double* A){
for (int i = 0 ; i < N ; i++){
if (A[i] != TRIALS){
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
return 0;
}
}
return 1;
}
int check_results_priv(double *A, double *B){
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*3) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
return 0;
}
if (B[i] != TRIALS*7) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
return 0;
}
}
return 1;
}
#define CODE() \
ZERO(A); \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target") \
_Pragma("omp teams distribute CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
A[i] += C[i]; \
} \
} \
success += check_results(A); \
if (success == expected) \
printf("Succeeded\n");
#define CODE_PRIV() \
ZERO(A); \
ZERO(B); \
p = 2.0; \
q = 4.0; \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target") \
_Pragma("omp teams distribute CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
p = 3; \
q = 7; \
A[i] += p; \
B[i] += q; \
} \
} \
success += check_results_priv(A, B); \
if (success == expected) \
printf("Succeeded\n");
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
int expected = 1;
int success = 0;
int chunkSize;
double p = 2.0, q = 4.0;
int nte, tl, blockSize;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
printf("iterations = teams\n");
#define CLAUSES num_teams(992)
CODE()
#undef CLAUSES
printf("iterations > teams\n");
#define CLAUSES num_teams(256)
CODE()
#undef CLAUSES
printf("iterations < teams\n");
#define CLAUSES num_teams(1024)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,1)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,512)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 512)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(512) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(1024) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(1024) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 3)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 3)
CODE()
#undef CLAUSES
printf("num_teams(10) dist_schedule(static, 99)\n");
#define CLAUSES num_teams(10) dist_schedule(static, 99)
CODE()
#undef CLAUSES
printf("num_teams(256) dist_schedule(static, 992)\n");
#define CLAUSES num_teams(256) dist_schedule(static, 992)
CODE()
#undef CLAUSES
#if 0
printf("num_teams(256) private(p,q)\n");
#define CLAUSES num_teams(256) private(p,q)
CODE_PRIV()
#undef CLAUSES
#endif
//
// Test: firstprivate
//
#if 0
printf("num_teams(64) firstprivate(p, q)\n");
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation
#pragma omp teams distribute num_teams(64) firstprivate(p, q)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
//
// Test: lastprivate
//
printf("num_teams(10) lastprivate(lastpriv)\n");
success = 0;
int lastpriv = -1;
#pragma omp target map(tofrom:lastpriv)
#pragma omp teams distribute num_teams(10) lastprivate(lastpriv)
for(int i = 0 ; i < omp_get_num_teams() ; i++)
lastpriv = omp_get_team_num();
if(lastpriv != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv, 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// // ***************************
// // Series 4: with parallel for
// // ***************************
//
// Test: simple blocking loop
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams distribute num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 256 ; j += blockSize) {
#pragma omp parallel for
for(int i = j ; i < j+blockSize; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: blocking loop where upper bound is not a multiple of tl*nte
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams distribute num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 510 ; j += blockSize) {
int ub = (j+blockSize < 510) ? (j+blockSize) : 512;
#pragma omp parallel for
for(int i = j ; i < ub; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 5: collapse
// **************************
//
// Test: 2 loops
//
printf("num_teams(512) collapse(2)\n");
success = 0;
double * S = malloc(N*N*sizeof(double));
double * T = malloc(N*N*sizeof(double));
double * U = malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N])
#pragma omp teams distribute num_teams(512) collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
printf("num_teams(512) collapse(3)\n");
success = 0;
int M = N/8;
double * V = malloc(M*M*M*sizeof(double));
double * Z = malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M])
#pragma omp teams distribute num_teams(512) collapse(3)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
gimple.h | /* Gimple IR definitions.
Copyright (C) 2007-2014 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
typedef gimple gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef vec<gimple> gimple_vec;
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use subcodes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_FROM_THUNK = 1 << 0,
GF_CALL_RETURN_SLOT_OPT = 1 << 1,
GF_CALL_TAILCALL = 1 << 2,
GF_CALL_VA_ARG_PACK = 1 << 3,
GF_CALL_NOTHROW = 1 << 4,
GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
GF_CALL_INTERNAL = 1 << 6,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
GF_OMP_FOR_KIND_MASK = 3 << 0,
GF_OMP_FOR_KIND_FOR = 0 << 0,
GF_OMP_FOR_KIND_DISTRIBUTE = 1 << 0,
GF_OMP_FOR_KIND_SIMD = 2 << 0,
GF_OMP_FOR_KIND_CILKSIMD = 3 << 0,
GF_OMP_FOR_COMBINED = 1 << 2,
GF_OMP_FOR_COMBINED_INTO = 1 << 3,
GF_OMP_TARGET_KIND_MASK = 3 << 0,
GF_OMP_TARGET_KIND_REGION = 0 << 0,
GF_OMP_TARGET_KIND_DATA = 1 << 0,
GF_OMP_TARGET_KIND_UPDATE = 2 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_OMP_ATOMIC_NEED_VALUE = 1 << 0,
GF_OMP_ATOMIC_SEQ_CST = 1 << 1,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there are only two types of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0,
GIMPLE_DEBUG_SOURCE_BIND = 1
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY((desc ("gimple_statement_structure (&%h)"), tag ("GSS_BASE"),
chain_next ("%h.next"), variable_size))
gimple_statement_base
{
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
basic_block bb;
/* [ WORD 4-5 ]
Linked lists of gimple statements. The next pointers form
a NULL terminated list, the prev pointers are a cyclic list.
A gimple statement is hence also a double-ended list of
statements, with the pointer itself being the first element,
and the prev pointer being the last. */
gimple next;
gimple GTY((skip)) prev;
};
/* Base structure for tuples with operands. */
/* This gimple subclass has no tag value. */
struct GTY(())
gimple_statement_with_ops_base : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY((tag("GSS_WITH_OPS")))
gimple_statement_with_ops : public gimple_statement_with_ops_base
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY((tag("GSS_WITH_MEM_OPS_BASE")))
gimple_statement_with_memory_ops_base : public gimple_statement_with_ops_base
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8-9 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY((tag("GSS_WITH_MEM_OPS")))
gimple_statement_with_memory_ops :
public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
struct GTY((tag("GSS_CALL")))
gimple_statement_call : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10-13 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 14 ] */
union GTY ((desc ("%1.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
/* [ WORD 15 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY((tag("GSS_OMP")))
gimple_statement_omp : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY((tag("GSS_BIND")))
gimple_statement_bind : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 8 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 9 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY((tag("GSS_CATCH")))
gimple_statement_catch : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree types;
/* [ WORD 8 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY((tag("GSS_EH_FILTER")))
gimple_statement_eh_filter : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Filter types. */
tree types;
/* [ WORD 8 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_ELSE */
struct GTY((tag("GSS_EH_ELSE")))
gimple_statement_eh_else : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY((tag("GSS_EH_MNT")))
gimple_statement_eh_mnt : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY((tag("GSS_PHI")))
gimple_statement_phi : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 8 ] */
tree result;
/* [ WORD 9 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_eh_ctrl : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Exception region number. */
int region;
};
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_resx : public gimple_statement_eh_ctrl
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_RESX. */
};
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_eh_dispatch : public gimple_statement_eh_ctrl
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_EH_DISPATH. */
};
/* GIMPLE_TRY */
struct GTY((tag("GSS_TRY")))
gimple_statement_try : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 8 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY((tag("GSS_WCE")))
gimple_statement_wce : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 7 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY((tag("GSS_ASM")))
gimple_statement_asm : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
__asm__ statement. */
const char *string;
/* [ WORD 11 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 12 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY((tag("GSS_OMP_CRITICAL")))
gimple_statement_omp_critical : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY((tag("GSS_OMP_FOR")))
gimple_statement_omp_for : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 10 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 11 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL, GIMPLE_OMP_TARGET */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_parallel_layout : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Clauses. */
tree clauses;
/* [ WORD 9 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 10 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_PARALLEL or GIMPLE_TASK */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_taskreg : public gimple_statement_omp_parallel_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_PARALLEL
|| stmt->code == GIMPLE_OMP_TASK. */
};
/* GIMPLE_OMP_PARALLEL */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_parallel : public gimple_statement_omp_taskreg
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_PARALLEL. */
};
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_target : public gimple_statement_omp_parallel_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_TARGET. */
};
/* GIMPLE_OMP_TASK */
struct GTY((tag("GSS_OMP_TASK")))
gimple_statement_omp_task : public gimple_statement_omp_taskreg
{
/* [ WORD 1-10 ] : base class */
/* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 12-13 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY((tag("GSS_OMP_SECTIONS")))
gimple_statement_omp_sections : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY((tag("GSS_OMP_CONTINUE")))
gimple_statement_omp_continue : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree control_def;
/* [ WORD 8 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE, GIMPLE_OMP_TEAMS */
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_single_layout : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 7 ] */
tree clauses;
};
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_single : public gimple_statement_omp_single_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_SINGLE. */
};
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_teams : public gimple_statement_omp_single_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_TEAMS. */
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY((tag("GSS_OMP_ATOMIC_LOAD")))
gimple_statement_omp_atomic_load : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7-8 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_atomic_store_layout : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree val;
};
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_atomic_store :
public gimple_statement_omp_atomic_store_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_ATOMIC_STORE. */
};
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_return :
public gimple_statement_omp_atomic_store_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_RETURN. */
};
/* GIMPLE_TRANSACTION. */
/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
/* The __transaction_atomic was declared [[outer]] or it is
__transaction_relaxed. */
#define GTMA_IS_OUTER (1u << 0)
#define GTMA_IS_RELAXED (1u << 1)
#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
/* The transaction is seen to not have an abort. */
#define GTMA_HAVE_ABORT (1u << 2)
/* The transaction is seen to have loads or stores. */
#define GTMA_HAVE_LOAD (1u << 3)
#define GTMA_HAVE_STORE (1u << 4)
/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
/* The transaction WILL enter serial irrevocable mode.
An irrevocable block post-dominates the entire transaction, such
that all invocations of the transaction will go serial-irrevocable.
In such case, we don't bother instrumenting the transaction, and
tell the runtime that it should begin the transaction in
serial-irrevocable mode. */
#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
/* The transaction contains no instrumentation code whatsover, most
likely because it is guaranteed to go irrevocable upon entry. */
#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
struct GTY((tag("GSS_TRANSACTION")))
gimple_statement_transaction : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ] */
gimple_seq body;
/* [ WORD 11 ] */
tree label;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
template <>
template <>
inline bool
is_a_helper <gimple_statement_asm>::test (gimple gs)
{
return gs->code == GIMPLE_ASM;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_bind>::test (gimple gs)
{
return gs->code == GIMPLE_BIND;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_call>::test (gimple gs)
{
return gs->code == GIMPLE_CALL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_catch>::test (gimple gs)
{
return gs->code == GIMPLE_CATCH;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_resx>::test (gimple gs)
{
return gs->code == GIMPLE_RESX;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_dispatch>::test (gimple gs)
{
return gs->code == GIMPLE_EH_DISPATCH;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_else>::test (gimple gs)
{
return gs->code == GIMPLE_EH_ELSE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_filter>::test (gimple gs)
{
return gs->code == GIMPLE_EH_FILTER;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_mnt>::test (gimple gs)
{
return gs->code == GIMPLE_EH_MUST_NOT_THROW;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_atomic_load>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_atomic_store>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_STORE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_return>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_RETURN;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_continue>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_CONTINUE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_critical>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_CRITICAL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_for>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_FOR;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_taskreg>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_parallel>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_target>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TARGET;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_sections>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_SECTIONS;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_single>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_SINGLE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_teams>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TEAMS;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_task>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_phi>::test (gimple gs)
{
return gs->code == GIMPLE_PHI;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_transaction>::test (gimple gs)
{
return gs->code == GIMPLE_TRANSACTION;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_try>::test (gimple gs)
{
return gs->code == GIMPLE_TRY;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_wce>::test (gimple gs)
{
return gs->code == GIMPLE_WITH_CLEANUP_EXPR;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_asm>::test (const_gimple gs)
{
return gs->code == GIMPLE_ASM;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_bind>::test (const_gimple gs)
{
return gs->code == GIMPLE_BIND;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_call>::test (const_gimple gs)
{
return gs->code == GIMPLE_CALL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_catch>::test (const_gimple gs)
{
return gs->code == GIMPLE_CATCH;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_resx>::test (const_gimple gs)
{
return gs->code == GIMPLE_RESX;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_eh_dispatch>::test (const_gimple gs)
{
return gs->code == GIMPLE_EH_DISPATCH;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_eh_filter>::test (const_gimple gs)
{
return gs->code == GIMPLE_EH_FILTER;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_atomic_load>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_atomic_store>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_STORE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_return>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_continue>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_CONTINUE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_critical>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_CRITICAL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_for>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_FOR;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_taskreg>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_parallel>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_target>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TARGET;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_sections>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_SECTIONS;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_single>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_SINGLE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_teams>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TEAMS;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_task>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_phi>::test (const_gimple gs)
{
return gs->code == GIMPLE_PHI;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_transaction>::test (const_gimple gs)
{
return gs->code == GIMPLE_TRANSACTION;
}
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
gimple gimple_build_return (tree);
void gimple_call_reset_alias_info (gimple);
gimple gimple_build_call_vec (tree, vec<tree> );
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> );
gimple gimple_build_call_from_tree (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
gimple gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
vec<tree, va_gc> *, vec<tree, va_gc> *,
vec<tree, va_gc> *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple_statement_try *gimple_build_try (gimple_seq, gimple_seq,
enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (tree, tree, vec<tree> );
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_for (gimple_seq, int, tree, size_t, gimple_seq);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_taskgroup (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_omp_target (gimple_seq, int, tree);
gimple gimple_build_omp_teams (gimple_seq, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
extern void gimple_seq_add_stmt (gimple_seq *, gimple);
extern void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
extern void annotate_all_with_location_after (gimple_seq, gimple_stmt_iterator,
location_t);
extern void annotate_all_with_location (gimple_seq, location_t);
bool empty_body_p (gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
bool gimple_call_same_target_p (const_gimple, const_gimple);
int gimple_call_flags (const_gimple);
int gimple_call_arg_flags (const_gimple, unsigned);
int gimple_call_return_flags (const_gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, basic_block);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
gimple gimple_copy (gimple);
bool gimple_has_side_effects (const_gimple);
bool gimple_could_trap_p_1 (gimple, bool, bool);
bool gimple_could_trap_p (gimple);
bool gimple_assign_rhs_could_trap_p (gimple);
extern void dump_gimple_statistics (void);
unsigned get_gimple_rhs_num_ops (enum tree_code);
extern tree canonicalize_cond_expr_cond (tree);
gimple gimple_call_copy_skip_args (gimple, bitmap);
extern bool gimple_compare_field_offset (tree, tree);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern bool gimple_ior_addresses_taken (bitmap, gimple);
extern bool gimple_builtin_call_types_compatible_p (gimple, tree);
extern bool gimple_call_builtin_p (gimple);
extern bool gimple_call_builtin_p (gimple, enum built_in_class);
extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
extern void dump_decl_set (FILE *, bitmap);
extern bool nonfreeing_call_p (gimple);
extern bool infer_nonnull_range (gimple, tree, bool, bool);
extern void sort_case_labels (vec<tree> );
extern void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
extern void gimple_seq_set_location (gimple_seq , location_t);
/* Formal (expression) temporary table handling: multiple occurrences of
the same scalar expression are evaluated into the same temporary. */
typedef struct gimple_temp_hash_elt
{
tree val; /* Key */
tree temp; /* Value */
} elt_t;
/* Get the number of the next statement uid to be allocated. */
static inline unsigned int
gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid;
}
/* Set the number of the next statement uid to be allocated. */
static inline void
set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid)
{
fn->last_stmt_uid = maxid;
}
/* Set the number of the next statement uid to be allocated. */
static inline unsigned int
inc_gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid++;
}
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (gimple_seq s)
{
return s;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return n;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
return s ? s->prev : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return n;
}
/* Set the last node in GIMPLE sequence *PS to LAST. */
static inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
(*ps)->prev = last;
}
/* Set the first node in GIMPLE sequence *PS to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
{
*ps = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (gimple_seq s)
{
return s == NULL;
}
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
}
static inline gimple_seq *
bb_seq_addr (basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
bb->il.gimple.seq = seq;
}
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
case GIMPLE_TRANSACTION:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline basic_block
gimple_bb (const_gimple g)
{
return g->bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return LOCATION_BLOCK (g->location);
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
if (block)
g->location =
COMBINE_LOCATION_DATA (line_table, g->location, block);
else
g->location = LOCATION_LOCUS (g->location);
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->plf |= (unsigned int) plf;
else
stmt->plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->uid;
}
/* Make statement G a singleton sequence. */
static inline void
gimple_init_singleton (gimple g)
{
g->next = NULL;
g->prev = g;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_with_ops>::test (const_gimple gs)
{
return gimple_has_ops (gs);
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_with_ops>::test (gimple gs)
{
return gimple_has_ops (gs);
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_with_memory_ops>::test (const_gimple gs)
{
return gimple_has_mem_ops (gs);
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_with_memory_ops>::test (gimple gs)
{
return gimple_has_mem_ops (gs);
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
const gimple_statement_with_ops *ops_stmt =
dyn_cast <const gimple_statement_with_ops> (g);
if (!ops_stmt)
return NULL;
return ops_stmt->use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gimple_statement_with_ops *ops_stmt =
as_a <gimple_statement_with_ops> (g);
ops_stmt->use_ops = use;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <const gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL_TREE;
return mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <const gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL_TREE;
return mem_ops_stmt->vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL;
return &mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL;
return &mem_ops_stmt->vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
as_a <gimple_statement_with_memory_ops> (g);
mem_ops_stmt->vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
as_a <gimple_statement_with_memory_ops> (g);
mem_ops_stmt->vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->modified : false;
}
/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
a MODIFIED field. */
static inline void
gimple_set_modified (gimple s, bool modifiedp)
{
if (gimple_has_ops (s))
s->modified = (unsigned) modifiedp;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
return CALL_EXPR;
}
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->has_volatile_ops = (unsigned) volatilep;
}
/* Return true if STMT is in a transaction. */
static inline bool
gimple_in_transaction (gimple stmt)
{
return bb_in_transaction (gimple_bb (stmt));
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_TEAMS);
return s->subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
s->subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Set the LHS of OMP return. */
static inline void
gimple_omp_return_set_lhs (gimple g, tree lhs)
{
gimple_statement_omp_return *omp_return_stmt =
as_a <gimple_statement_omp_return> (g);
omp_return_stmt->val = lhs;
}
/* Get the LHS of OMP return. */
static inline tree
gimple_omp_return_lhs (const_gimple g)
{
const gimple_statement_omp_return *omp_return_stmt =
as_a <const gimple_statement_omp_return> (g);
return omp_return_stmt->val;
}
/* Return a pointer to the LHS of OMP return. */
static inline tree *
gimple_omp_return_lhs_ptr (gimple g)
{
gimple_statement_omp_return *omp_return_stmt =
as_a <gimple_statement_omp_return> (g);
return &omp_return_stmt->val;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
static inline bool
gimple_omp_atomic_need_value_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
}
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
static inline void
gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_SEQ_CST flag set. */
static inline bool
gimple_omp_atomic_seq_cst_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_SEQ_CST) != 0;
}
/* Set the GF_OMP_ATOMIC_SEQ_CST flag on G. */
static inline void
gimple_omp_atomic_set_seq_cst (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->subcode |= GF_OMP_ATOMIC_SEQ_CST;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_gimple_checking_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = (enum tree_code) gs->subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if GS is an assignment with a singleton RHS, i.e.,
there is no operator associated with the assignment itself.
Unlike gimple_assign_copy_p, this predicate returns true for
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
static inline bool
gimple_assign_single_p (gimple gs)
{
return (is_gimple_assign (gs)
&& gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
}
/* Return true if GS performs a store to its lhs. */
static inline bool
gimple_store_p (gimple gs)
{
tree lhs = gimple_get_lhs (gs);
return lhs && !is_gimple_reg (lhs);
}
/* Return true if GS is an assignment that loads from its rhs1. */
static inline bool
gimple_assign_load_p (gimple gs)
{
tree rhs;
if (!gimple_assign_single_p (gs))
return false;
rhs = gimple_assign_rhs1 (gs);
if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
return true;
rhs = get_base_address (rhs);
return (DECL_P (rhs)
|| TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF);
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if S is a clobber statement. */
static inline bool
gimple_clobber_p (gimple s)
{
return gimple_assign_single_p (s)
&& TREE_CLOBBER_P (gimple_assign_rhs1 (s));
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->subcode & GF_CALL_INTERNAL) != 0;
}
/* Return the target of internal call GS. */
static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return static_cast <const gimple_statement_call *> (gs)->u.internal_fn;
}
/* Return the function type of the function called by GS. */
static inline tree
gimple_call_fntype (const_gimple gs)
{
const gimple_statement_call *call_stmt =
as_a <const gimple_statement_call> (gs);
if (gimple_call_internal_p (gs))
return NULL_TREE;
return call_stmt->u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
call_stmt->u.fntype = fntype;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* Set internal function FN to be the function called by call statement GS. */
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
call_stmt->u.internal_fn = fn;
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree type = gimple_call_fntype (gs);
if (type == NULL_TREE)
return TREE_TYPE (gimple_call_lhs (gs));
/* The type returned by a function is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->subcode |= GF_CALL_TAILCALL;
else
s->subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_TAILCALL) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->subcode |= GF_CALL_FROM_THUNK;
else
s->subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->subcode |= GF_CALL_VA_ARG_PACK;
else
s->subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->subcode |= GF_CALL_NOTHROW;
else
s->subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
static inline void
gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
s->subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
s->subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->subcode = orig_call->subcode;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
return &call_stmt->call_used;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
return &call_stmt->call_clobbered;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
const gimple_statement_bind *bind_stmt =
as_a <const gimple_statement_bind> (gs);
return bind_stmt->vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->vars = chainon (bind_stmt->vars, vars);
}
static inline gimple_seq *
gimple_bind_body_ptr (gimple gs)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
return &bind_stmt->body;
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
return *gimple_bind_body_ptr (gs);
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gimple_seq_add_stmt (&bind_stmt->body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gimple_seq_add_seq (&bind_stmt->body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
const gimple_statement_bind *bind_stmt =
as_a <const gimple_statement_bind> (gs);
return bind_stmt->block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
bind_stmt->block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni);
return gimple_op (gs, index + asm_stmt->no);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni);
return gimple_op_ptr (gs, index + asm_stmt->no);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni
&& TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->no, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op (gs, index);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op_ptr (gs, index);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nc);
return gimple_op (gs, index + asm_stmt->ni + asm_stmt->no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nc
&& TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nl);
return gimple_op (gs, index + asm_stmt->ni + asm_stmt->nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nl
&& TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->subcode |= GF_ASM_VOLATILE;
else
gs->subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->subcode |= GF_ASM_INPUT;
else
gs->subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
const gimple_statement_catch *catch_stmt =
as_a <const gimple_statement_catch> (gs);
return catch_stmt->types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
return &catch_stmt->types;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
return &catch_stmt->handler;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
return *gimple_catch_handler_ptr (gs);
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
catch_stmt->types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
catch_stmt->handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
const gimple_statement_eh_filter *eh_filter_stmt =
as_a <const gimple_statement_eh_filter> (gs);
return eh_filter_stmt->types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
return &eh_filter_stmt->types;
}
/* Return a pointer to the sequence of statement to execute when
GIMPLE_EH_FILTER statement fails. */
static inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple gs)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
return &eh_filter_stmt->failure;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
return *gimple_eh_filter_failure_ptr (gs);
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
eh_filter_stmt->types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
eh_filter_stmt->failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
return eh_mnt_stmt->fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
eh_mnt_stmt->fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
static inline gimple_seq *
gimple_eh_else_n_body_ptr (gimple gs)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
return &eh_else_stmt->n_body;
}
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
return *gimple_eh_else_n_body_ptr (gs);
}
static inline gimple_seq *
gimple_eh_else_e_body_ptr (gimple gs)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
return &eh_else_stmt->e_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
return *gimple_eh_else_e_body_ptr (gs);
}
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
eh_else_stmt->n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
eh_else_stmt->e_body = seq;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return a pointer to the sequence of statements used as the
body for GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_eval_ptr (gimple gs)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
return &try_stmt->eval;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
return *gimple_try_eval_ptr (gs);
}
/* Return a pointer to the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_cleanup_ptr (gimple gs)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
return &try_stmt->cleanup;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
return *gimple_try_cleanup_ptr (gs);
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
try_stmt->eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
try_stmt->cleanup = cleanup;
}
/* Return a pointer to the cleanup sequence for cleanup statement GS. */
static inline gimple_seq *
gimple_wce_cleanup_ptr (gimple gs)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
return &wce_stmt->cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
return *gimple_wce_cleanup_ptr (gs);
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
wce_stmt->cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
return &phi_stmt->result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
phi_stmt->result = result;
if (result && TREE_CODE (result) == SSA_NAME)
SSA_NAME_DEF_STMT (result) = gs;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
gcc_gimple_checking_assert (index <= phi_stmt->capacity);
return &(phi_stmt->args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
gcc_gimple_checking_assert (index <= phi_stmt->nargs);
phi_stmt->args[index] = *phiarg;
}
/* Return the PHI nodes for basic block BB, or NULL if there are no
PHI nodes. */
static inline gimple_seq
phi_nodes (const_basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
return bb->il.gimple.phi_nodes;
}
/* Return a pointer to the PHI nodes for basic block BB. */
static inline gimple_seq *
phi_nodes_ptr (basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
return &bb->il.gimple.phi_nodes;
}
/* Return the tree operand for argument I of PHI node GS. */
static inline tree
gimple_phi_arg_def (gimple gs, size_t index)
{
return gimple_phi_arg (gs, index)->def;
}
/* Return a pointer to the tree operand for argument I of PHI node GS. */
static inline tree *
gimple_phi_arg_def_ptr (gimple gs, size_t index)
{
return &gimple_phi_arg (gs, index)->def;
}
/* Return the edge associated with argument I of phi node GS. */
static inline edge
gimple_phi_arg_edge (gimple gs, size_t i)
{
return EDGE_PRED (gimple_bb (gs), i);
}
/* Return the source location of gimple argument I of phi node GS. */
static inline source_location
gimple_phi_arg_location (gimple gs, size_t i)
{
return gimple_phi_arg (gs, i)->locus;
}
/* Return the source location of the argument on edge E of phi node GS. */
static inline source_location
gimple_phi_arg_location_from_edge (gimple gs, edge e)
{
return gimple_phi_arg (gs, e->dest_idx)->locus;
}
/* Set the source location of gimple argument I of phi node GS to LOC. */
static inline void
gimple_phi_arg_set_location (gimple gs, size_t i, source_location loc)
{
gimple_phi_arg (gs, i)->locus = loc;
}
/* Return TRUE if argument I of phi node GS has a location record. */
static inline bool
gimple_phi_arg_has_location (gimple gs, size_t i)
{
return gimple_phi_arg_location (gs, i) != UNKNOWN_LOCATION;
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
const gimple_statement_resx *resx_stmt =
as_a <const gimple_statement_resx> (gs);
return resx_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
gimple_statement_resx *resx_stmt = as_a <gimple_statement_resx> (gs);
resx_stmt->region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
const gimple_statement_eh_dispatch *eh_dispatch_stmt =
as_a <const gimple_statement_eh_dispatch> (gs);
return eh_dispatch_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
gimple_statement_eh_dispatch *eh_dispatch_stmt =
as_a <gimple_statement_eh_dispatch> (gs);
eh_dispatch_stmt->region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_gimple_checking_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
&& (label == NULL_TREE
|| TREE_CODE (label) == CASE_LABEL_EXPR));
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
tree label = gimple_switch_label (gs, 0);
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
return label;
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline tree
gimple_debug_source_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline tree
gimple_debug_source_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
static inline tree *
gimple_debug_source_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline void
gimple_debug_source_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline void
gimple_debug_source_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* Return the line number for EXPR, or return -1 if we have no line
number information for it. */
static inline int
get_lineno (const_gimple stmt)
{
location_t loc;
if (!stmt)
return -1;
loc = gimple_location (stmt);
if (loc == UNKNOWN_LOCATION)
return -1;
return LOCATION_LINE (loc);
}
/* Return a pointer to the body for the OMP statement GS. */
static inline gimple_seq *
gimple_omp_body_ptr (gimple gs)
{
return &static_cast <gimple_statement_omp *> (gs)->body;
}
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return *gimple_omp_body_ptr (gs);
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
static_cast <gimple_statement_omp *> (gs)->body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
const gimple_statement_omp_critical *omp_critical_stmt =
as_a <const gimple_statement_omp_critical> (gs);
return omp_critical_stmt->name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
gimple_statement_omp_critical *omp_critical_stmt =
as_a <gimple_statement_omp_critical> (gs);
return &omp_critical_stmt->name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
gimple_statement_omp_critical *omp_critical_stmt =
as_a <gimple_statement_omp_critical> (gs);
omp_critical_stmt->name = name;
}
/* Return the kind of OMP for statemement. */
static inline int
gimple_omp_for_kind (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_KIND_MASK);
}
/* Set the OMP for kind. */
static inline void
gimple_omp_for_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK)
| (kind & GF_OMP_FOR_KIND_MASK);
}
/* Return true if OMP for statement G has the
GF_OMP_FOR_COMBINED flag set. */
static inline bool
gimple_omp_for_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED) != 0;
}
/* Set the GF_OMP_FOR_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_for_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
g->subcode |= GF_OMP_FOR_COMBINED;
else
g->subcode &= ~GF_OMP_FOR_COMBINED;
}
/* Return true if OMP for statement G has the
GF_OMP_FOR_COMBINED_INTO flag set. */
static inline bool
gimple_omp_for_combined_into_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED_INTO) != 0;
}
/* Set the GF_OMP_FOR_COMBINED_INTO field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_for_set_combined_into_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
g->subcode |= GF_OMP_FOR_COMBINED_INTO;
else
g->subcode &= ~GF_OMP_FOR_COMBINED_INTO;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
return omp_for_stmt->clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return &omp_for_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
omp_for_stmt->clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return omp_for_stmt->collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].incr = incr;
}
/* Return a pointer to the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return &omp_for_stmt->pre_body;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
return *gimple_omp_for_pre_body_ptr (gs);
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
omp_for_stmt->pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
const gimple_statement_omp_single *omp_single_stmt =
as_a <const gimple_statement_omp_single> (gs);
return omp_single_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
gimple_statement_omp_single *omp_single_stmt =
as_a <gimple_statement_omp_single> (gs);
return &omp_single_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_single *omp_single_stmt =
as_a <gimple_statement_omp_single> (gs);
omp_single_stmt->clauses = clauses;
}
/* Return the clauses associated with OMP_TARGET GS. */
static inline tree
gimple_omp_target_clauses (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TARGET GS. */
static inline tree *
gimple_omp_target_clauses_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_TARGET GS. */
static inline void
gimple_omp_target_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->clauses = clauses;
}
/* Return the kind of OMP target statemement. */
static inline int
gimple_omp_target_kind (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
return (gimple_omp_subcode (g) & GF_OMP_TARGET_KIND_MASK);
}
/* Set the OMP target kind. */
static inline void
gimple_omp_target_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK)
| (kind & GF_OMP_TARGET_KIND_MASK);
}
/* Return the child function used to hold the body of OMP_TARGET GS. */
static inline tree
gimple_omp_target_child_fn (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TARGET GS. */
static inline tree *
gimple_omp_target_child_fn_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TARGET GS. */
static inline void
gimple_omp_target_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TARGET GS. */
static inline tree
gimple_omp_target_data_arg (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TARGET GS. */
static inline tree *
gimple_omp_target_data_arg_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TARGET GS. */
static inline void
gimple_omp_target_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TEAMS GS. */
static inline tree
gimple_omp_teams_clauses (const_gimple gs)
{
const gimple_statement_omp_teams *omp_teams_stmt =
as_a <const gimple_statement_omp_teams> (gs);
return omp_teams_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TEAMS GS. */
static inline tree *
gimple_omp_teams_clauses_ptr (gimple gs)
{
gimple_statement_omp_teams *omp_teams_stmt =
as_a <gimple_statement_omp_teams> (gs);
return &omp_teams_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_TEAMS GS. */
static inline void
gimple_omp_teams_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_teams *omp_teams_stmt =
as_a <gimple_statement_omp_teams> (gs);
omp_teams_stmt->clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
const gimple_statement_omp_sections *omp_sections_stmt =
as_a <const gimple_statement_omp_sections> (gs);
return omp_sections_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
return &omp_sections_stmt->clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
omp_sections_stmt->clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
const gimple_statement_omp_sections *omp_sections_stmt =
as_a <const gimple_statement_omp_sections> (gs);
return omp_sections_stmt->control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
return &omp_sections_stmt->control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
omp_sections_stmt->control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
&& i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <gimple_statement_omp_atomic_store> (g);
omp_atomic_store_stmt->val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
const gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <const gimple_statement_omp_atomic_store> (g);
return omp_atomic_store_stmt->val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <gimple_statement_omp_atomic_store> (g);
return &omp_atomic_store_stmt->val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
omp_atomic_load_stmt->lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <const gimple_statement_omp_atomic_load> (g);
return omp_atomic_load_stmt->lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
return &omp_atomic_load_stmt->lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
omp_atomic_load_stmt->rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <const gimple_statement_omp_atomic_load> (g);
return omp_atomic_load_stmt->rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
return &omp_atomic_load_stmt->rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
const gimple_statement_omp_continue *omp_continue_stmt =
as_a <const gimple_statement_omp_continue> (g);
return omp_continue_stmt->control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
return &omp_continue_stmt->control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
omp_continue_stmt->control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
const gimple_statement_omp_continue *omp_continue_stmt =
as_a <const gimple_statement_omp_continue> (g);
return omp_continue_stmt->control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
return &omp_continue_stmt->control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
omp_continue_stmt->control_use = use;
}
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq *
gimple_transaction_body_ptr (gimple gs)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
return &transaction_stmt->body;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
return *gimple_transaction_body_ptr (gs);
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
static inline tree
gimple_transaction_label (const_gimple gs)
{
const gimple_statement_transaction *transaction_stmt =
as_a <const gimple_statement_transaction> (gs);
return transaction_stmt->label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
return &transaction_stmt->label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
transaction_stmt->body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
transaction_stmt->label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->subcode = subcode;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statement STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_TARGET: \
case GIMPLE_OMP_TEAMS: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_TASKGROUP: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->subcode = (gs->subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->subcode |= GF_PREDICT_TAKEN;
else
gs->subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
{
if (gimple_call_internal_p (stmt)
&& gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
type = gimple_call_return_type (stmt);
}
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
/* Return true if a location should not be emitted for this statement
by annotate_all_with_location. */
static inline bool
gimple_do_not_emit_location_p (gimple g)
{
return gimple_plf (g, GF_PLF_1);
}
/* Mark statement G so a location will not be emitted by
annotate_one_with_location. */
static inline void
gimple_set_do_not_emit_location (gimple g)
{
/* The PLF flags are initialized to 0 when a new tuple is created,
so no need to initialize it anywhere. */
gimple_set_plf (g, GF_PLF_1, true);
}
/* Macros for showing usage statistics. */
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
? (x) \
: ((x) < 1024*1024*10 \
? (x) / 1024 \
: (x) / (1024*1024))))
#define LABEL(x) ((x) < 1024*10 ? 'b' : ((x) < 1024*1024*10 ? 'k' : 'M'))
#endif /* GCC_GIMPLE_H */
|
XT_ICD_update.c | /* ============================================================================
* Copyright (c) 2013 K. Aditya Mohan (Purdue University)
* Copyright (c) 2013 Singanallur Venkatakrishnan (Purdue University)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of K. Aditya Mohan, Purdue
* University, nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
#include "XT_Constants.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "allocate.h"
#include "randlib.h"
#include <time.h>
#include "XT_AMatrix.h"
#include "XT_Profile.h"
#include "XT_Structures.h"
#include "XT_IOMisc.h"
#include "XT_NHICD.h"
#include "omp.h"
#include "XT_MPI.h"
#include <mpi.h>
#include "XT_VoxUpdate.h"
#include "XT_ForwardProject.h"
#include "XT_MPIIO.h"
#include "XT_Debug.h"
#include "XT_OffsetError.h"
/*computes the location of (i,j,k) th element in a 1D array*/
int32_t array_loc_1D (int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k)
{
return (i*N_j*N_k + j*N_k + k);
}
/*finds the maximum in a array 'array_in' with number of elements being 'num'*/
int32_t find_max(int32_t* array_in, int32_t num)
{
int32_t i, maxnum;
maxnum = array_in[0];
for (i=1; i<num; i++)
if (array_in[i] > maxnum)
maxnum = array_in[i];
return(maxnum);
}
/*converts the value 'val' to hounsfield units and returns it*/
Real_t convert2Hounsfield (Real_t val)
{
Real_t slope, c;
slope=(HOUNSFIELD_WATER_MAP-HOUNSFIELD_AIR_MAP)/(WATER_MASS_ATT_COEFF*WATER_DENSITY-AIR_MASS_ATT_COEFF*AIR_DENSITY)/HFIELD_UNIT_CONV_CONST;
c=-slope*(AIR_MASS_ATT_COEFF*AIR_DENSITY*HFIELD_UNIT_CONV_CONST);
return (slope*val + c);
}
/*Computes the qGGMRF spatial prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Spatial_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_S_Q)/(ScannedObjectPtr->C_S + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_S_Q_P));
}
/*Computes the qGGMRF temporal prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Temporal_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_T_Q)/(ScannedObjectPtr->C_T + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_T_Q_P));
}
/*Computes the qGGMRF spatial prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Spatial_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
Real_t temp1,temp2,temp3;
temp1=pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_S_Q_P);
temp2=pow(fabs(delta),MRF_Q - 1);
temp3 = ScannedObjectPtr->C_S + temp1;
if(delta < 0)
return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
else
{
return ((temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
}
}
/*Computes the qGGMRF temporal prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Temporal_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
Real_t temp1,temp2,temp3;
temp1 = pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_T_Q_P);
temp2 = pow(fabs(delta),MRF_Q - 1);
temp3 = ScannedObjectPtr->C_T + temp1;
if(delta < 0)
return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
else
{
return ((temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
}
}
/*Computes the qGGMRF spatial prior second derivative at delta = 0*/
Real_t CE_QGGMRF_Spatial_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return MRF_Q/(TomoInputsPtr->Sigma_S_Q*ScannedObjectPtr->C_S);
}
/*Computes the qGGMRF spatial prior second derivative at delta = 0*/
Real_t CE_QGGMRF_Temporal_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return MRF_Q/(TomoInputsPtr->Sigma_T_Q*ScannedObjectPtr->C_T);
}
/*Computes the voxel update and returns it. V is the present value of voxel.
THETA1 and THETA2 are the values used in voxel update. Spatial_Nhood and Time_Nhood gives the
values of voxels in the neighborhood of V. Time_BDFlag and Spatial_BDFlag are masks which determine
whether a neighbor should be included in the neighorhood or not.*/
Real_t CE_FunctionalSubstitution(Real_t V, Real_t THETA1, Real_t THETA2, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_t Spatial_Nhood[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], Real_t Time_Nhood[NHOOD_TIME_MAXDIM-1], bool Spatial_BDFlag[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], bool Time_BDFlag[NHOOD_TIME_MAXDIM-1])
{
Real_t u,temp1=0,temp2=0,temp_const,RefValue=0,Delta0;
Real_t QGGMRF_Params;
int32_t i,j,k;
RefValue = V;
/*Need to Loop this for multiple iterations of substitute function*/
for (i=0; i < NHOOD_Y_MAXDIM; i++)
for (j=0; j < NHOOD_X_MAXDIM; j++)
for (k=0; k < NHOOD_Z_MAXDIM; k++)
{
if(Spatial_BDFlag[i][j][k] == true && (i != (NHOOD_Y_MAXDIM-1)/2 || j != (NHOOD_X_MAXDIM-1)/2 || k != (NHOOD_Z_MAXDIM-1)/2))
{
Delta0 = (RefValue - Spatial_Nhood[i][j][k]);
if(Delta0 != 0)
QGGMRF_Params = CE_QGGMRF_Spatial_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0);
else {
QGGMRF_Params = CE_QGGMRF_Spatial_SecondDerivative(ScannedObjectPtr,TomoInputsPtr);
}
temp_const = TomoInputsPtr->Spatial_Filter[i][j][k]*QGGMRF_Params;
temp1 += temp_const*Spatial_Nhood[i][j][k];
temp2 += temp_const;
}
}
for (i=0; i < NHOOD_TIME_MAXDIM - 1; i++)
{
if(Time_BDFlag[i] == true)
{
Delta0 = (RefValue - Time_Nhood[i]);
if(Delta0 != 0)
QGGMRF_Params = CE_QGGMRF_Temporal_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0);
else {
QGGMRF_Params = CE_QGGMRF_Temporal_SecondDerivative(ScannedObjectPtr,TomoInputsPtr);
}
temp_const = TomoInputsPtr->Time_Filter[0]*QGGMRF_Params;
temp1 += temp_const*Time_Nhood[i];
temp2 += temp_const;
}
}
u=(temp1+ (THETA2*V) - THETA1)/(temp2 + THETA2);
RefValue = RefValue + TomoInputsPtr->alpha*(u-RefValue);
#ifdef POSITIVITY_CONSTRAINT
if (RefValue <= 0)
RefValue = 0;
#endif
return RefValue;
}
/*computes the value of cost function. 'ErrorSino' is the error sinogram*/
Real_t computeCost(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t cost=0,temp=0, forward=0, prior=0;
Real_t delta;
int32_t i,j,k,p,N_z;
bool j_minus, k_minus, i_plus, j_plus, k_plus, p_plus;
#pragma omp parallel for private(j, k, temp) reduction(+:cost)
for (i = 0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
{
temp = ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k]);
if (SinogramPtr->ProjSelect[i][j][k] == true)
temp = temp*temp;
else
temp = 2.0*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*fabs(temp) + TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoThresh*(1.0-2.0*TomoInputsPtr->ErrorSinoDelta);
cost += temp;
}
cost /= 2.0;
/*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/
temp = 0;
N_z = ScannedObjectPtr->N_z + 2;
if (TomoInputsPtr->node_rank == TomoInputsPtr->node_num-1)
N_z = ScannedObjectPtr->N_z + 1;
#pragma omp parallel for private(delta, p, j, k, j_minus, k_minus, p_plus, i_plus, j_plus, k_plus) reduction(+:temp)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (p = 1; p < ScannedObjectPtr->N_z + 1; p++)
for (j = 0; j < ScannedObjectPtr->N_y; j++)
{
for (k = 0; k < ScannedObjectPtr->N_x; k++)
{
j_minus = (j - 1 >= 0)? true : false;
k_minus = (k - 1 >= 0)? true : false;
p_plus = (p + 1 < N_z)? true : false;
i_plus = (i + 1 < ScannedObjectPtr->N_time)? true : false;
j_plus = (j + 1 < ScannedObjectPtr->N_y)? true : false;
k_plus = (k + 1 < ScannedObjectPtr->N_x)? true : false;
if(k_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j][k + 1]);
temp += TomoInputsPtr->Spatial_Filter[1][1][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
if(j_plus == true) {
if(k_minus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k - 1]);
temp += TomoInputsPtr->Spatial_Filter[1][2][0] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k]);
temp += TomoInputsPtr->Spatial_Filter[1][2][1] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
if(k_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k + 1]);
temp += TomoInputsPtr->Spatial_Filter[1][2][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
}
if (p_plus == true)
{
if(j_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k];
temp += TomoInputsPtr->Spatial_Filter[2][0][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j][k];
temp += TomoInputsPtr->Spatial_Filter[2][1][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
if(j_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j + 1][k];
temp += TomoInputsPtr->Spatial_Filter[2][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(j_minus == true)
{
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][0][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][0][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][1][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(j_plus == true)
{
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(i_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i+1][p][j][k]);
temp += TomoInputsPtr->Time_Filter[0] * CE_QGGMRF_Temporal_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
}
}
/*Use MPI reduction operation to add the forward and prior costs from all nodes*/
MPI_Reduce(&cost, &forward, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&temp, &prior, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD);
if (TomoInputsPtr->node_rank == 0)
{
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Scaled error sino cost = %f\n",forward);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Decrease in scaled error sino cost = %f\n",TomoInputsPtr->ErrorSino_Cost-forward);
TomoInputsPtr->ErrorSino_Cost = forward;
forward += (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t*log(TomoInputsPtr->var_est)/2;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Forward cost = %f\n",forward);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Prior cost = %f\n",prior);
TomoInputsPtr->Forward_Cost = forward;
TomoInputsPtr->Prior_Cost = prior;
cost = forward + prior;
}
/*Broadcase the value of cost to all nodes*/
MPI_Bcast(&cost, 1, MPI_REAL_DATATYPE, 0, MPI_COMM_WORLD);
return cost;
}
/*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/
void upsample_bilinear_2D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t i, j, k, m;
Real_arr_t **buffer;
#pragma omp parallel for private(buffer, m, j, k)
for (i=0; i < N_time; i++)
for (m=0; m < N_z; m++)
{
buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x);
for (j=0; j < N_y; j++){
buffer[j][0] = Init[i][m][j][0];
buffer[j][1] = (3.0*Init[i][m][j][0] + Init[i][m][j][1])/4.0;
buffer[j][2*N_x - 1] = Init[i][m][j][N_x - 1];
buffer[j][2*N_x - 2] = (Init[i][m][j][N_x - 2] + 3.0*Init[i][m][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer[j][2*k] = (Init[i][m][j][k-1] + 3.0*Init[i][m][j][k])/4.0;
buffer[j][2*k + 1] = (3.0*Init[i][m][j][k] + Init[i][m][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
Object[i][m][0][k] = buffer[0][k];
Object[i][m][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0;
Object[i][m][2*N_y-1][k] = buffer[N_y-1][k];
Object[i][m][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++){
for (k=0; k<2*N_x; k++){
Object[i][m][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0;
Object[i][m][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0;
}
}
multifree(buffer,2);
}
}
/*Upsamples the (N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/
void upsample_object_bilinear_2D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t j, k, slice;
Real_arr_t **buffer;
buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer[j][0] = Init[slice][j][0];
buffer[j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0;
buffer[j][2*N_x - 1] = Init[slice][j][N_x - 1];
buffer[j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer[j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0;
buffer[j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
Object[slice+1][0][k] = buffer[0][k];
Object[slice+1][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0;
Object[slice+1][2*N_y-1][k] = buffer[N_y-1][k];
Object[slice+1][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++){
for (k=0; k<2*N_x; k++){
Object[slice+1][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0;
Object[slice+1][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0;
}
}
}
multifree(buffer,2);
}
void upsample_bilinear_3D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t i, j, k, slice;
Real_t ***buffer2D, ***buffer3D;
#pragma omp parallel for private(buffer2D, buffer3D, slice, j, k)
for (i=0; i < N_time; i++)
{
buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x);
buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer2D[slice][j][0] = Init[i][slice][j][0];
buffer2D[slice][j][1] = (3.0*Init[i][slice][j][0] + Init[i][slice][j][1])/4.0;
buffer2D[slice][j][2*N_x - 1] = Init[i][slice][j][N_x - 1];
buffer2D[slice][j][2*N_x - 2] = (Init[i][slice][j][N_x - 2] + 3.0*Init[i][slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer2D[slice][j][2*k] = (Init[i][slice][j][k-1] + 3.0*Init[i][slice][j][k])/4.0;
buffer2D[slice][j][2*k + 1] = (3.0*Init[i][slice][j][k] + Init[i][slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
buffer3D[slice][0][k] = buffer2D[slice][0][k];
buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0;
buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k];
buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++)
for (k=0; k<2*N_x; k++){
buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0;
buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0;
}
}
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[i][0][j][k] = buffer3D[0][j][k];
Object[i][1][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0;
Object[i][2*N_z-1][j][k] = buffer3D[N_z-1][j][k];
Object[i][2*N_z-2][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0;
}
for (slice=1; slice < N_z-1; slice++)
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[i][2*slice][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0;
Object[i][2*slice+1][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0;
}
multifree(buffer2D,3);
multifree(buffer3D,3);
}
}
/*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution.*/
/*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the in 3D x-y-z coordinates and stores it in 'Object'*/
void upsample_object_bilinear_3D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t j, k, slice;
Real_t ***buffer2D, ***buffer3D;
buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x);
buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer2D[slice][j][0] = Init[slice][j][0];
buffer2D[slice][j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0;
buffer2D[slice][j][2*N_x - 1] = Init[slice][j][N_x - 1];
buffer2D[slice][j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer2D[slice][j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0;
buffer2D[slice][j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
buffer3D[slice][0][k] = buffer2D[slice][0][k];
buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0;
buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k];
buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++)
for (k=0; k<2*N_x; k++){
buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0;
buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0;
}
}
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[1][j][k] = buffer3D[0][j][k];
Object[2][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0;
Object[2*N_z][j][k] = buffer3D[N_z-1][j][k];
Object[2*N_z-1][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0;
}
for (slice=1; slice < N_z-1; slice++)
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[2*slice+1][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0;
Object[2*slice+2][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0;
}
multifree(buffer2D,3);
multifree(buffer3D,3);
}
/*randomly select the voxels lines which need to be updated along the x-y plane for each z-block and time slice*/
void randomly_select_x_y (ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, uint8_t*** Mask)
{
int32_t i, j, num,n, Index, col, row, *Counter, ArraySize, block;
ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
Counter = (int32_t*)get_spc(ArraySize, sizeof(int32_t));
for (i=0; i<ScannedObjectPtr->N_time; i++)
for (block=0; block<TomoInputsPtr->num_z_blocks; block++)
{
ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
for (Index = 0; Index < ArraySize; Index++)
Counter[Index] = Index;
TomoInputsPtr->UpdateSelectNum[i][block] = 0;
for (j=0; j<ScannedObjectPtr->N_x*ScannedObjectPtr->N_y; j++){
Index = floor(random2() * ArraySize);
Index = (Index == ArraySize)?ArraySize-1:Index;
col = Counter[Index] % ScannedObjectPtr->N_x;
row = Counter[Index] / ScannedObjectPtr->N_x;
for (n = block*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n < (block+1)*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n++)
if (Mask[i][row][col] == 1)
{
num = TomoInputsPtr->UpdateSelectNum[i][block];
TomoInputsPtr->x_rand_select[i][block][num] = col;
TomoInputsPtr->y_rand_select[i][block][num] = row;
(TomoInputsPtr->UpdateSelectNum[i][block])++;
break;
}
Counter[Index] = Counter[ArraySize - 1];
ArraySize--;
}
}
free(Counter);
}
/*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution.
--initICD--
If 1, initializes the object to 0
If 2, the code uses bilinear interpolation to initialize the object if the previous reconstruction was at a lower resolution
The function also initializes the magnitude update map 'MagUpdateMap' from the previous coarser resolution
reconstruction. */
int32_t initObject (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t**** MagUpdateMap)
{
char object_file[100];
int dimTiff[4];
int32_t i, j, k, l, flag = 0;
int64_t size;
Real_arr_t ***Init, ****UpMapInit;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (j = 0; j < ScannedObjectPtr->N_z; j++)
for (k = 0; k < ScannedObjectPtr->N_y; k++)
for (l = 0; l < ScannedObjectPtr->N_x; l++)
ScannedObjectPtr->Object[i][j+1][k][l] = OBJECT_INIT_VAL;
if (TomoInputsPtr->initICD > 3 || TomoInputsPtr->initICD < 0){
sentinel(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: initICD value not recognized.\n");
}
else if (TomoInputsPtr->initICD == 1)
{
size = (int64_t)ScannedObjectPtr->N_z*(int64_t)ScannedObjectPtr->N_y*(int64_t)ScannedObjectPtr->N_x;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i);
if (read_SharedBinFile_At (object_file, &(ScannedObjectPtr->Object[i][1][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
}
if (TomoInputsPtr->initMagUpMap == 1)
{
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(MagUpdateMap[0][0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
}
}
else if (TomoInputsPtr->initICD == 2 || TomoInputsPtr->initICD == 3)
{
if (TomoInputsPtr->initICD == 3)
{
Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 3D bilinear interpolation.\n");
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i);
size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8;
if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
upsample_object_bilinear_3D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
}
multifree(Init,3);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 3D bilinear interpolation.\n");
}
else
{
Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 2D bilinear interpolation.\n");
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i);
size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4;
if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
upsample_object_bilinear_2D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
}
multifree(Init,3);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 2D bilinear interpolation.\n");
}
if (TomoInputsPtr->initMagUpMap == 1)
{
if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks)
{
UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 2D bilinear interpolation.\n");
upsample_bilinear_2D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
multifree(UpMapInit,4);
}
else if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks/2)
{
UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 3D bilinear interpolation.\n");
upsample_bilinear_3D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
multifree(UpMapInit,4);
}
else
{
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of axial blocks is incompatible with previous stage of multi-resolution.\n");
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Initializing the multi-resolution map to zeros.\n");
}
}
}
dimTiff[0] = ScannedObjectPtr->N_time; dimTiff[1] = TomoInputsPtr->num_z_blocks; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x;
sprintf(object_file, "%s_n%d", MAG_UPDATE_FILENAME, TomoInputsPtr->node_rank);
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(MagUpdateMap[0][0][0][0]), 0, TomoInputsPtr->debug_file_ptr))
flag = -1;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf (object_file, "%s_n%d", INIT_OBJECT_FILENAME, TomoInputsPtr->node_rank);
sprintf (object_file, "%s_time_%d", object_file, i);
dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x;
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(ScannedObjectPtr->Object[i][1][0][0]), 0, TomoInputsPtr->debug_file_ptr))
flag = -1;
}
return (flag);
error:
return (-1);
}
/*'initErrorSinogram' is used to initialize the error sinogram before start of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting the obkject x.*/
int32_t initErrorSinogam (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, Real_arr_t*** ErrorSino/*, AMatrixCol* VoxelLineResponse*/)
{
Real_t pixel, avg=0;
int32_t dimTiff[4], i, j, k, p, sino_idx, slice, flag = 0;
AMatrixCol* AMatrixPtr = (AMatrixCol*)get_spc(ScannedObjectPtr->N_time, sizeof(AMatrixCol));
uint8_t AvgNumXElements = (uint8_t)ceil(3*ScannedObjectPtr->delta_xy/SinogramPtr->delta_r);
char error_file[100] = "error_sinogram";
sprintf(error_file, "%s_n%d", error_file, TomoInputsPtr->node_rank);
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
AMatrixPtr[i].values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t));
AMatrixPtr[i].index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t));
}
memset(&(ErrorSino[0][0][0]), 0, SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r*sizeof(Real_arr_t));
#pragma omp parallel for private(j, k, p, sino_idx, slice, pixel)
for (i=0; i<ScannedObjectPtr->N_time; i++)
{
for (j=0; j<ScannedObjectPtr->N_y; j++)
{
for (k=0; k<ScannedObjectPtr->N_x; k++){
for (p=0; p<ScannedObjectPtr->ProjNum[i]; p++){
sino_idx = ScannedObjectPtr->ProjIdxPtr[i][p];
calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, DetectorResponse, &(AMatrixPtr[i]), j, k, sino_idx);
for (slice=0; slice<ScannedObjectPtr->N_z; slice++){
/* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/
pixel = ScannedObjectPtr->Object[i][slice+1][j][k]; /*slice+1 to account for extra z slices required for MPI*/
forward_project_voxel (SinogramPtr, pixel, ErrorSino, &(AMatrixPtr[i])/*, &(VoxelLineResponse[slice])*/, sino_idx, slice);
}
}
}
}
}
#pragma omp parallel for private(j, k) reduction(+:avg)
for(i=0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
{
ErrorSino[i][j][k] = SinogramPtr->Projection[i][j][k] - ErrorSino[i][j][k] - SinogramPtr->ProjOffset[j][k];
if (fabs(ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[i][j][k] = true;
else
SinogramPtr->ProjSelect[i][j][k] = false;
/* if (ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k]) < -30)
TomoInputsPtr->Weight[i][j][k] = 0;*/
avg+=ErrorSino[i][j][k];
}
avg = avg/(SinogramPtr->N_r*SinogramPtr->N_t*SinogramPtr->N_p);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average of error sinogram in node %d is %f\n", TomoInputsPtr->node_rank, avg);
dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t;
if (TomoInputsPtr->Write2Tiff == 1)
flag = WriteMultiDimArray2Tiff (error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr);
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
free(AMatrixPtr[i].values);
free(AMatrixPtr[i].index);
}
free (AMatrixPtr);
multifree(SinogramPtr->Projection,3);
return (flag);
}
/*Updates the variance parameter \sigma*/
void update_variance_parameter (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
int32_t k, i, j;
Real_t temp_acc = 0, temp = 0;
#pragma omp parallel for private(i, j, temp) reduction(+:temp_acc)
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
TomoInputsPtr->Weight[k][i][j] = TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est;
if (SinogramPtr->ProjSelect[k][i][j] == true)
temp = ErrorSino[k][i][j]*ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
else
temp = fabs(ErrorSino[k][i][j])*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*sqrt(TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est);
temp_acc += temp;
}
MPI_Allreduce(&temp_acc, &temp, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
TomoInputsPtr->var_est = temp/((Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t);
#pragma omp parallel for private(i, j)
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
TomoInputsPtr->Weight[k][i][j] /= TomoInputsPtr->var_est;
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
void update_d_offset_rect_patch_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **b, **Lambda, temp;
Real_arr_t **x;
int32_t i, j, k;
b = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
Lambda = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
x = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
memset(&(b[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
memset(&(Lambda[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
memset(&(x[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t));
#pragma omp parallel for collapse(2) private(i, j, k, temp, sign)
for (i = 0; i < SinogramPtr->N_r; i++)
{
for (j = 0; j < SinogramPtr->N_t; j++)
{
b[i][j] = 0;
Lambda[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
Lambda[i][j] += TomoInputsPtr->Weight[k][i][j];
b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
Lambda[i][j] += temp/fabs(ErrorSino[k][i][j]);
b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*temp/fabs(ErrorSino[k][i][j]);
}
}
}
}
constrained_quad_opt (Lambda, b, SinogramPtr->off_constraint, x, SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->off_constraint_num, TomoInputsPtr);
#pragma omp parallel for collapse(3) private(i, j, k)
for (k = 0; k < SinogramPtr->N_p; k++)
{
for (i = 0; i < SinogramPtr->N_r; i++)
{
for (j = 0; j < SinogramPtr->N_t; j++)
{
ErrorSino[k][i][j] += SinogramPtr->ProjOffset[i][j] - x[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
}
memcpy(&(SinogramPtr->ProjOffset[0][0]),&(x[0][0]),SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t));
multifree(b,2);
multifree(Lambda,2);
multifree(x,2);
}
/*Updates the projection offset error parameter d_i*/
void update_d_offset_zero_mean_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **numerator, num_sum = 0, temp, **denominator, den_sum = 0, gamma = 0;
int32_t i, j, k;
numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
#pragma omp parallel for private(j, k, temp, sign) reduction(+:num_sum, den_sum)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
numerator[i][j] = 0;
denominator[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
denominator[i][j] += TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
numerator[i][j] += temp*sign;
denominator[i][j] += temp/fabs(ErrorSino[k][i][j]);
}
}
num_sum += SinogramPtr->ProjOffset[i][j] + (numerator[i][j]/denominator[i][j]);
den_sum += 1.0/denominator[i][j];
}
gamma = num_sum/den_sum;
#pragma omp parallel for private(j, k)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j]-gamma)/denominator[i][j];
for (k = 0; k < SinogramPtr->N_p; k++)
{
ErrorSino[k][i][j] -= (numerator[i][j]-gamma)/denominator[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
multifree(numerator,2);
multifree(denominator,2);
}
/*Updates the projection offset error parameter d_i*/
void update_d_offset_unconstrained (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **numerator, temp, **denominator;
int32_t i, j, k;
numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
#pragma omp parallel for private(j, k, temp, sign)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
numerator[i][j] = 0;
denominator[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
denominator[i][j] += TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
numerator[i][j] += temp*sign;
denominator[i][j] += temp/fabs(ErrorSino[k][i][j]);
}
}
}
#pragma omp parallel for private(j, k)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j])/denominator[i][j];
for (k = 0; k < SinogramPtr->N_p; k++)
{
ErrorSino[k][i][j] -= (numerator[i][j])/denominator[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
multifree(numerator,2);
multifree(denominator,2);
}
void update_Sinogram_Offset (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
if (TomoInputsPtr->OffsetConstraintType == 1)
update_d_offset_unconstrained (SinogramPtr, TomoInputsPtr, ErrorSino);
else if (TomoInputsPtr->OffsetConstraintType == 2)
update_d_offset_zero_mean_constraint (SinogramPtr, TomoInputsPtr, ErrorSino);
else if (TomoInputsPtr->OffsetConstraintType == 3)
update_d_offset_rect_patch_constraint (SinogramPtr, TomoInputsPtr, ErrorSino);
}
/*Implements mutithreaded shared memory parallelization using OpenMP and splits work among
threads. Each thread gets a certain time slice and z block to update.
Multithreading is done within the z-blocks assigned to each node.
ErrorSino - Error sinogram
Iter - Present iteration number
MagUpdateMap - Magnitude update map containing the magnitude of update of each voxel
Mask - If a certain element is true then the corresponding voxel is updated*/
int updateVoxelsTimeSlices(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, /*AMatrixCol* VoxelLineResponse,*/ Real_arr_t*** ErrorSino, int32_t Iter, Real_arr_t**** MagUpdateMap, uint8_t*** Mask)
{
Real_t AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0;
int32_t xy_start, xy_end, i, j, K, block, idx, **z_start, **z_stop;
Real_t tempTotPix = 0, total_pix = 0;
long int **zero_count, total_zero_count = 0;
int32_t** thread_num = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
MPI_Request *send_reqs, *recv_reqs;
send_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request));
recv_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request));
z_start = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
z_stop = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
randomly_select_x_y (ScannedObjectPtr, TomoInputsPtr, Mask);
zero_count = (long int**)multialloc(sizeof(long int), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
/* offset_numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
memset(&(offset_denominator[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
offset_denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; */
memset(&(zero_count[0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*sizeof(long int));
/* K = ScannedObjectPtr->N_time*ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
K = (K - total_zero_count)/(ScannedObjectPtr->gamma*K);*/
K = ScannedObjectPtr->NHICD_Iterations;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K);
if (Iter == 1)
{
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
}
for (j = 0; j < K; j++)
{
total_vox_mag = 0.0;
#pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block: block + 1;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K);
xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1;
xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end;
/* printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/
total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
}
/*check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Send MPI info\n");*/
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Will compute projection offset error\n");*/
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Done computing projection offset error\n");*/
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
#pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block + 1: block;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K);
xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1;
xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end;
total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 2 - i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
VSC_based_Voxel_Line_Select(ScannedObjectPtr, TomoInputsPtr, MagUpdateMap);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD voxel lines to be updated in iteration %d is %d\n", j, num_voxel_lines);*/
if (Iter > 1 && TomoInputsPtr->no_NHICD == 0)
{
#pragma omp parallel for collapse(2) private(i, block, idx)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block: block + 1;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 1 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
#pragma omp parallel for collapse(2) private(i, block, idx)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block + 1: block;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 2 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
}
}
if (TomoInputsPtr->updateVar == 1)
update_variance_parameter (SinogramPtr, TomoInputsPtr, ErrorSino);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time Slice, Z Start, Z End - Thread : ");
total_pix = 0;
for (i=0; i<ScannedObjectPtr->N_time; i++){
for (block=0; block<TomoInputsPtr->num_z_blocks; block++){
total_pix += TomoInputsPtr->UpdateSelectNum[i][block]*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
for (j=0; j<TomoInputsPtr->UpdateSelectNum[i][block]; j++){
AverageUpdate += MagUpdateMap[i][block][TomoInputsPtr->y_rand_select[i][block][j]][TomoInputsPtr->x_rand_select[i][block][j]];
}
total_zero_count += zero_count[i][block];
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "%d,%d,%d-%d; ", i, z_start[i][block], z_stop[i][block], thread_num[i][block]);
}
}
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "\n");
MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
AverageUpdate = tempUpdate/(tempTotPix);
AverageUpdate = convert2Hounsfield(AverageUpdate);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average voxel update over all voxels is %f, total voxels is %f.\n", AverageUpdate, tempTotPix);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Zero count is %ld.\n", total_zero_count);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter divisor is %f.\n", (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t);
multifree(zero_count,2);
multifree(thread_num,2);
multifree(z_start,2);
multifree(z_stop,2);
free(send_reqs);
free(recv_reqs);
/* multifree(offset_numerator,2);
multifree(offset_denominator,2);*/
avg_update_percentage = 100*tempUpdate/vox_mag;
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is %f.\n", avg_update_percentage);
if (avg_update_percentage < TomoInputsPtr->StopThreshold)
{
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n");
return (1);
}
return(0);
}
/*ICD_BackProject calls the ICD optimization function repeatedly till the stopping criteria is met.*/
int ICD_BackProject(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
#ifndef NO_COST_CALCULATE
Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0;
char costfile[100]=COST_FILENAME;
#endif
Real_arr_t ***ErrorSino, **H_r, *H_t;
Real_t x, y;
int32_t j, flag = 0, Iter, i, k;
int dimTiff[4];
char VarEstFile[100] = VAR_PARAM_FILENAME;
char scaled_error_file[100] = SCALED_ERROR_SINO_FILENAME;
time_t start;
char detect_file[100] = DETECTOR_RESPONSE_FILENAME;
char projselect_file[100] = PROJ_SELECT_FILENAME;
char MagUpdateMapFile[100] = MAG_UPDATE_FILENAME;
uint8_t ***Mask;
/*AMatrixCol *VoxelLineResponse;*/
#ifdef POSITIVITY_CONSTRAINT
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Enforcing positivity constraint\n");
#endif
Real_arr_t**** MagUpdateMap = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x);
H_r = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS + 1);
H_t = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t));
ErrorSino = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_r, SinogramPtr->N_t);
Mask = (uint8_t***)multialloc(sizeof(uint8_t), 3, ScannedObjectPtr->N_time, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x);
memset(&(MagUpdateMap[0][0][0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x*sizeof(Real_arr_t));
/* omp_set_num_threads(TomoInputsPtr->num_threads);*/
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs());
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ICD_BackProject: Number of threads is %d\n", TomoInputsPtr->num_threads) ;*/
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (j = 0; j < ScannedObjectPtr->N_y; j++)
for (k = 0; k < ScannedObjectPtr->N_x; k++){
x = ScannedObjectPtr->x0 + ((Real_t)k + 0.5)*ScannedObjectPtr->delta_xy;
y = ScannedObjectPtr->y0 + ((Real_t)j + 0.5)*ScannedObjectPtr->delta_xy;
if (x*x + y*y < TomoInputsPtr->radius_obj*TomoInputsPtr->radius_obj)
Mask[i][j][k] = 1;
else
Mask[i][j][k] = 0;
}
DetectorResponseProfile (H_r, H_t, SinogramPtr, ScannedObjectPtr, TomoInputsPtr);
dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinogramPtr->N_p; dimTiff[3] = DETECTOR_RESPONSE_BINS+1;
sprintf(detect_file, "%s_n%d", detect_file, TomoInputsPtr->node_rank);
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (detect_file, dimTiff, 0, 1, 2, 3, &(H_r[0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
start = time(NULL);
if (initObject(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, MagUpdateMap)) goto error;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to read object = %fmins\n", difftime(time(NULL),start)/60.0);
if (initErrorSinogam(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, ErrorSino/*, VoxelLineResponse*/)) goto error;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL),start)/60.0);
#ifndef NO_COST_CALCULATE
cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino);
cost_0_iter = cost;
cost_last_iter = cost;
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n",cost);
if (TomoInputsPtr->node_rank == 0)
Write2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr);
#endif /*Cost calculation endif*/
start=time(NULL);
for (Iter = 1; Iter <= TomoInputsPtr->NumIter; Iter++)
{
flag = updateVoxelsTimeSlices (SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, /*VoxelLineResponse,*/ ErrorSino, Iter, MagUpdateMap, Mask);
if (TomoInputsPtr->WritePerIter == 1)
if (write_ObjectProjOff2TiffBinPerIter (SinogramPtr, ScannedObjectPtr, TomoInputsPtr)) goto error;
#ifndef NO_COST_CALCULATE
cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino);
percentage_change_in_cost = ((cost - cost_last_iter)/(cost - cost_0_iter))*100.0;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f.\n", TomoInputsPtr->var_est);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n",Iter,cost,difftime(time(NULL),start)/60.0);
if (TomoInputsPtr->node_rank == 0)
Append2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr);
check_error(cost > cost_last_iter, TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cost value increased.\n");
cost_last_iter = cost;
/*if (percentage_change_in_cost < TomoInputsPtr->cost_thresh && flag != 0 && Iter > 1){*/
if (flag != 0 && Iter > 1){
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n");
break;
}
#else
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f\n",TomoInputsPtr->var_est);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n",Iter,difftime(time(NULL),start)/60.0);
if (flag != 0 && Iter > 1){
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n");
break;
}
#endif
flag = fflush(TomoInputsPtr->debug_file_ptr);
if (flag != 0)
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n");
}
for (i = 0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
ErrorSino[i][j][k] *= sqrt(TomoInputsPtr->Weight[i][j][k]);
if (TomoInputsPtr->node_rank == 0)
Write2Bin (VarEstFile, 1, 1, 1, 1, sizeof(Real_t), &(TomoInputsPtr->var_est), TomoInputsPtr->debug_file_ptr);
int64_t size = (int64_t)ScannedObjectPtr->N_time*(int64_t)TomoInputsPtr->num_z_blocks*(int64_t)ScannedObjectPtr->N_y*(int64_t)ScannedObjectPtr->N_x;
if (write_SharedBinFile_At (MagUpdateMapFile, &(MagUpdateMap[0][0][0][0]), (int64_t)TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) goto error;
sprintf(scaled_error_file, "%s_n%d", scaled_error_file, TomoInputsPtr->node_rank);
sprintf(projselect_file, "%s_n%d", projselect_file, TomoInputsPtr->node_rank);
dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t;
if (TomoInputsPtr->Write2Tiff == 1)
{
if (WriteMultiDimArray2Tiff (scaled_error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
if (WriteBoolArray2Tiff (projselect_file, dimTiff, 0, 3, 1, 2, &(SinogramPtr->ProjSelect[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
}
multifree(ErrorSino,3);
multifree(H_r,2);
free(H_t);
multifree(Mask,3);
multifree(MagUpdateMap, 4);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Finished running ICD_BackProject.\n");
flag = fflush(TomoInputsPtr->debug_file_ptr);
if (flag != 0 )
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n");
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "The estimated value of variance parameter is %f.\n", TomoInputsPtr->var_est);
return(0);
error:
multifree(ErrorSino,3);
multifree(H_r,2);
free(H_t);
multifree(Mask,3);
multifree(MagUpdateMap, 4);
return(-1);
}
|
mea_puu_traco.c | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <omp.h>
#include <math.h>
#define min(a,b) (((a)<(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#define max(a,b) (((a)>(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
double ** Q;
double ** Qbp;
double ** Pbp;
double ** Pu;
double ** M;
int Ebp = 0; // Energy weight of base pair -2, -1, 0, 1, 2
int RT = 1; // 'Normalized' temperature 1,2,3,4,5
float ERT;
int l = 0; //minimum loop length 0-5
int delta = 1; // Base pair weighting 1-5
char * RNA; //only ACGU
int N;
int DIM;
#include "../mem.h"
int paired(int i, int j) {
char nt1 = RNA[i];
char nt2 = RNA[j];
if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') ||
(nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') ||
(nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){
return 1;}
else
return 0;
}
int main(int argc, char *argv[]){
int num_proc=1;
int i,j,k,ll,p,q;
int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c15;
int t1, t2, t3, t4, t5, t6,t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
ERT = exp((float)-Ebp/(float)RT);
srand(time(NULL));
if(argc > 1)
num_proc = atoi(argv[1]);
int kind=1;
N = 8;
DIM = 12;
if(argc > 2)
N = atoi(argv[2]);
DIM = N+10;
if(argc > 3)
kind = atoi(argv[3]);
omp_set_num_threads(num_proc);
//printf(" -exp(Ebp/RT) = %5.3f\n", ERT);
RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file
rand_seq(RNA, N);
//printf("Sequence: ");
//for(i=0; i<N; i++)
// printf("%c", RNA[i]);
//printf("\n\n");
Q = memd();
Qbp = memd();
Pbp = memd();
Pu = memd();
M = memd();
rna_array_init(Q, 1, 1);
rna_array_init(Qbp, 0, 0);
rna_array_init(Pbp, 0, 0);
rna_array_init(Pu, 0, 0);
rna_array_init(M, 0, 0);
double * Puu = (double*)malloc(DIM * sizeof(double));
double start = omp_get_wtime();
// compute the partition functions Q and Qbp
if(kind==1){
#pragma scop
for(i=0; i<=N; i++){
Puu[i] = 1;
for(j=i+1; j<N; j++){
Puu[i] += -1 * Pbp[i][j+1];
}
for(k=0; k<i; k++){
Puu[i] += -1 * Pbp[k][i+1];
}
}
#pragma endscop
}
if(kind==2) // pluto
{
printf("pluto\n");
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if (N >= 0) {
lbp=0;
ubp=floord(N,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
lbv=16*t2;
ubv=min(N,16*t2+15);
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
Puu[t3] = 1;;
}
}
if (N >= 1) {
lbp=0;
ubp=floord(N,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
if (t2 <= floord(N-2,16)) {
for (t4=t2;t4<=floord(N-1,16);t4++) {
for (t5=16*t2;t5<=min(min(N-2,16*t2+15),16*t4+14);t5++) {
for (t7=max(16*t4,t5+1);t7<=min(N-1,16*t4+15);t7++) {
Puu[t5] += -1 * Pbp[t5][t7+1];;
}
}
}
}
for (t4=0;t4<=min(floord(N-1,16),t2);t4++) {
for (t5=max(16*t2,16*t4+1);t5<=min(N,16*t2+15);t5++) {
for (t7=16*t4;t7<=min(16*t4+15,t5-1);t7++) {
Puu[t5] += -1 * Pbp[t7][t5+1];;
}
}
}
}
}
}
/* End of CLooG code */
}
if(kind==3) // traco
{
printf("traco\n");
}
if(kind==4) // traco tstile
{
printf("traco cor\n");
#pragma omp parallel for
for( c1 = 0; c1 <= N/16; c1 += 1) {
for( c2 = 0; c2 <= min(1, -8 * c1 + N / 2); c2 += 1) {
if (c2 == 1) {
for( c3 = 0; c3 <= -c1 + (N - 2) / 16; c3 += 1)
for( c5 = 16 * c1; c5 <= min(16 * c1 + 15, N - 16 * c3 - 2); c5 += 1)
for( c7 = 16 * c3 + c5 + 1; c7 <= min(N - 1, 16 * c3 + c5 + 16); c7 += 1)
Puu[c5] += -1 * Pbp[c5][c7+1];
} else {
for( c5 = 16 * c1; c5 <= min(N, 16 * c1 + 15); c5 += 1)
Puu[c5] = 1;
}
}
for( c3 = 0; c3 <= min(c1, floord(N - 1, 16)); c3 += 1)
for( c5 = max(16 * c1, 16 * c3 + 1); c5 <= min(N, 16 * c1 + 15); c5 += 1)
for( c7 = 16 * c3; c7 <= min(16 * c3 + 15, c5 - 1); c7 += 1)
Puu[c5] += -1 * Pbp[k][c5+1];
}
}
double stop = omp_get_wtime();
printf("%.4f\n",stop - start);
//printf("Q\n");
//rna_array_print(Q);
//printf("Qbp\n");
//rna_array_print(Qbp);
exit(0);
return 0;
}
|
net_md5_fmt_plug.c | /* Cracker for RIPv2 MD5 authentication hashes.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Added linkage to dynamic (type dynamic_39) for any salt 230 bytes or less,
* by Jim Fougeron. Any salts > 239 bytes will still be handled by this full
* format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes
* of salt. I think we might be able to get 239 bytes (due to a few issues).
* 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts
* within dynamic. This is the FIRST format that is hybrid fat-thin.
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#ifndef DYNAMIC_DISABLED
#if FMT_EXTERNS_H
extern struct fmt_main fmt_netmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_netmd5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include "formats.h"
#include "dynamic.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "net-md5"
#define FORMAT_NAME "\"Keyed MD5\" RIPv2, OSPF, BGP, SNMPv2"
#define FORMAT_TAG "$netmd5$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
// RIPv2 truncates (or null pads) passwords to length 16
#define PLAINTEXT_LENGTH 16
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_SALT_LEN 1500
static struct fmt_tests tests[] = {
/* RIPv2 MD5 authentication hashes */
{ "02020000ffff0003002c01145267d48d000000000000000000020000ac100100ffffff000000000000000001ffff0001$1e372a8a233c6556253a0909bc3dcce6", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d48f000000000000000000020000ac100100ffffff000000000000000001ffff0001$ed9f940c3276afcc06d15babe8a1b61b", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d490000000000000000000020000ac100100ffffff000000000000000001ffff0001$c9f7763f80fcfcc2bbbca073be1f5df7", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49a000000000000000000020000ac100200ffffff000000000000000001ffff0001$3f6a72deeda200806230298af0797997", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49b000000000000000000020000ac100200ffffff000000000000000001ffff0001$b69184bacccc752cadf78cac455bd0de", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49d000000000000000000020000ac100100ffffff000000000000000001ffff0001$6442669c577e7662188865a54c105d0e", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267e076000000000000000000020000ac100200ffffff000000000000000001ffff0001$4afe22cf1750d9af8775b25bcf9cfb8c", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e077000000000000000000020000ac100200ffffff000000000000000001ffff0001$326b12f6da03048a655ea4d8f7e3e123", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e2ab000000000000000000020000ac100100ffffff000000000000000001ffff0001$ad76c40e70383f6993f54b4ba6492a26", "abcdefghijklmnop"},
/* OSPFv2 MD5 authentication hashes */
{"$netmd5$0201002cac1001010000000000000002000001105267ff8fffffff00000a0201000000280000000000000000$445ecbb27272bd791a757a6c85856150", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ff98ffffff00000a0201000000280000000000000000$d4c248b417b8cb1490e02c5e99eb0ad1", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ffa2ffffff00000a0201000000280000000000000000$528d9bf98be8213482af7295307625bf", "abcdefghijklmnop"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void get_ptr();
static void init(struct fmt_main *self);
static void done(void);
#define MAGIC 0xfe5dd5ef
static struct custom_salt {
ARCH_WORD_32 magic;
int length;
unsigned char salt[MAX_SALT_LEN]; // fixd len, but should be OK
} *cur_salt;
static int dyna_salt_seen=0;
static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough.
static struct fmt_main *pDynamicFmt, *pNetMd5_Dyna;
/* this function converts a 'native' net-md5 signature string into a $dynamic_39$ syntax string */
static char *Convert(char *Buf, char *ciphertext)
{
char *cp, *cp2;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
return ciphertext;
cp = strchr(&ciphertext[2], '$');
if (!cp)
return "*";
cp2 = strchr(&cp[1], '$');
if (!cp2)
return "*";
snprintf(Buf, sizeof(Conv_Buf), "$dynamic_39$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp);
return Buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > MAX_SALT_LEN * 2)
return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q)) {
get_ptr();
return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt);
}
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static char *pBuf=NULL;
struct custom_salt *cs;
char *orig_ct = ciphertext;
int i, len;
if (!pBuf) pBuf = (char *)mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
cs = (struct custom_salt*) pBuf;
memset(cs, 0, sizeof(*cs));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
if (len < 230) {
// return our memset buffer (putting the dyna salt pointer into it).
// This keeps the 'pre-cleaned salt() warning from hitting this format)
//return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct));
memcpy((char*)cs, pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size);
dyna_salt_seen=1;
return cs;
}
cs->magic = MAGIC;
cs->length = len;
return cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
// returns proper 16 bytes, so we do not need to copy into our buffer.
return pDynamicFmt->methods.binary(ciphertext);
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
get_ptr();
if (cur_salt->magic != MAGIC) {
pDynamicFmt->methods.set_salt(salt);
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.crypt_all(pcount, salt);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
MD5_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_all(binary, count);
}
for (; index < count; index++)
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_one(binary, index);
}
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void netmd5_set_key(char *key, int index)
{
if(dyna_salt_seen)
pDynamicFmt->methods.set_key(key, index);
/* strncpy will pad with zeros, which is needed */
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) {
get_ptr();
if (text_in_dynamic_format_already(pDynamicFmt, hash))
return hash;
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_netmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
netmd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
static void get_ptr() {
if (!pDynamicFmt) {
char *Buf;
pNetMd5_Dyna = mem_alloc_tiny(sizeof(fmt_netmd5), 16);
memcpy(pNetMd5_Dyna, &fmt_netmd5, sizeof(fmt_netmd5));
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 0);
fmt_netmd5.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt;
fmt_netmd5.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt;
Buf = mem_alloc_tiny(strlen(fmt_netmd5.params.algorithm_name) + 4 + strlen("dynamic_39") + 1, 1);
sprintf(Buf, "%s or %s", fmt_netmd5.params.algorithm_name, "dynamic_39");
fmt_netmd5.params.algorithm_name = Buf;
//pDynamicFmt->methods.init(pDynamicFmt);
}
}
static void init(struct fmt_main *self)
{
// We have to allocate our dyna_39 object first, because we get 'modified' min/max counts from there.
get_ptr();
if (self->private.initialized == 0) {
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 1);
self->private.initialized = 1;
}
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
pDynamicFmt->methods.done();
}
#endif /* plugin stanza */
#endif /* DYNAMIC_DISABLED */
|
irbuilder_unroll_full.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_full(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_full(float *a, float *b, float *c, float *d) {
#pragma omp unroll full
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.full"}
|
reciprocal_to_normal.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdlib.h>
#include <math.h>
#include <phonoc_utils.h>
#include <phonoc_const.h>
#include <phonoc_array.h>
#include <phonon3_h/reciprocal_to_normal.h>
#include <lapack_wrapper.h>
#ifdef MEASURE_R2N
#include <unistd.h>
#include <time.h>
#endif
static lapack_complex_double fc3_sum_in_reciprocal_to_normal
(const int bi0,
const int bi1,
const int bi2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const int num_atom);
static double get_fc3_sum
(const int j,
const int k,
const int bi,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const int num_atom,
const double cutoff_frequency);
void reciprocal_to_normal_squared
(double *fc3_normal_squared,
PHPYCONST int (*g_pos)[4],
const int num_g_pos,
const lapack_complex_double *fc3_reciprocal,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const double *masses,
const int *band_indices,
const int num_band0,
const int num_band,
const double cutoff_frequency,
const int openmp_at_bands)
{
int i, num_atom;
#ifdef MEASURE_R2N
double loopTotalCPUTime, loopTotalWallTime;
time_t loopStartWallTime;
clock_t loopStartCPUTime;
#endif
num_atom = num_band / 3;
#ifdef MEASURE_R2N
loopStartWallTime = time(NULL);
loopStartCPUTime = clock();
#endif
#pragma omp parallel for if (openmp_at_bands)
for (i = 0; i < num_g_pos; i++) {
if (freqs0[band_indices[g_pos[i][0]]] > cutoff_frequency) {
fc3_normal_squared[g_pos[i][3]] = get_fc3_sum(g_pos[i][1],
g_pos[i][2],
band_indices[g_pos[i][0]],
freqs0,
freqs1,
freqs2,
eigvecs0,
eigvecs1,
eigvecs2,
fc3_reciprocal,
masses,
num_atom,
cutoff_frequency);
}
}
#ifdef MEASURE_R2N
loopTotalCPUTime = (double)(clock() - loopStartCPUTime) / CLOCKS_PER_SEC;
loopTotalWallTime = difftime(time(NULL), loopStartWallTime);
printf(" %1.3fs (%1.3fs CPU)\n", loopTotalWallTime, loopTotalCPUTime);
#endif
}
static double get_fc3_sum
(const int j,
const int k,
const int bi,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const int num_atom,
const double cutoff_frequency)
{
double fff, sum_real, sum_imag;
lapack_complex_double fc3_sum;
if (freqs1[j] > cutoff_frequency && freqs2[k] > cutoff_frequency) {
fff = freqs0[bi] * freqs1[j] * freqs2[k];
fc3_sum = fc3_sum_in_reciprocal_to_normal
(bi, j, k,
eigvecs0, eigvecs1, eigvecs2,
fc3_reciprocal,
masses,
num_atom);
sum_real = lapack_complex_double_real(fc3_sum);
sum_imag = lapack_complex_double_imag(fc3_sum);
return (sum_real * sum_real + sum_imag * sum_imag) / fff;
} else {
return 0;
}
}
static lapack_complex_double fc3_sum_in_reciprocal_to_normal
(const int bi0,
const int bi1,
const int bi2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const int num_atom)
{
int i, j, k, l, m, n, index_l, index_lm, baseIndex;
double sum_real, sum_imag, mmm, mass_l, mass_lm;
lapack_complex_double eig_prod, eig_prod1;
sum_real = 0;
sum_imag = 0;
for (l = 0; l < num_atom; l++) {
mass_l = masses[l];
index_l = l * num_atom * num_atom * 27;
for (m = 0; m < num_atom; m++) {
mass_lm = mass_l * masses[m];
index_lm = index_l + m * num_atom * 27;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
eig_prod1 = phonoc_complex_prod
(eigvecs0[(l * 3 + i) * num_atom * 3 + bi0],
eigvecs1[(m * 3 + j) * num_atom * 3 + bi1]);
for (n = 0; n < num_atom; n++) {
mmm = 1.0 / sqrt(mass_lm * masses[n]);
baseIndex = index_lm + n * 27 + i * 9 + j * 3;
for (k = 0; k < 3; k++) {
eig_prod = phonoc_complex_prod
(eig_prod1, eigvecs2[(n * 3 + k) * num_atom * 3 + bi2]);
eig_prod = phonoc_complex_prod
(eig_prod, fc3_reciprocal[baseIndex + k]);
sum_real += lapack_complex_double_real(eig_prod) * mmm;
sum_imag += lapack_complex_double_imag(eig_prod) * mmm;
}
}
}
}
}
}
return lapack_make_complex_double(sum_real, sum_imag);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(4*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t3+Nx,32),floord(Nt+Nx-4,32)),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),32*t4+30),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) {
for (t4=max(max(ceild(t1-12,16),ceild(4*t2-Nz-19,32)),ceild(8*t3-Ny-19,32));t4<=min(min(min(floord(4*Nt+Nx-9,32),floord(2*t1+Nx-3,32)),floord(4*t2+Nx-9,32)),floord(8*t3+Nx-5,32));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(32*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
loop-11.c | #include <omp.h>
#include <stdlib.h>
#include <string.h>
int
test1 (void)
{
short int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[10]; &buf[54] > p; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[3]; &buf[63] >= p; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[16]; &buf[51] > p; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[53]; &buf[9] < p; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[63]; &buf[3] <= p; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[48]; &buf[15] < p; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for
for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test2 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[10]; &buf[54] > p; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[3]; &buf[63] >= p; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[16]; &buf[51] > p; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[53]; &buf[9] < p; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[63]; &buf[3] <= p; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[48]; &buf[15] < p; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (static, 3)
for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test3 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[10]; &buf[54] > p; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[3]; &buf[63] >= p; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[16]; &buf[51] > p; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[53]; &buf[9] < p; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[63]; &buf[3] <= p; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[48]; &buf[15] < p; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (dynamic, 3)
for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
test4 (void)
{
int buf[64], *p;
int i;
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[10]; &buf[54] > p; p++)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[3]; &buf[63] >= p; p += 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[16]; &buf[51] > p; p = 4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[53]; &buf[9] < p; --p)
*p = 5;
for (i = 0; i < 64; i++)
if (buf[i] != 5 * (i >= 10 && i < 54))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[63]; &buf[3] <= p; p -= 2)
p[-2] = 6;
for (i = 0; i < 64; i++)
if (buf[i] != 6 * ((i & 1) && i <= 61))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[48]; &buf[15] < p; p = -4 + p)
p[2] = 7;
for (i = 0; i < 64; i++)
if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
abort ();
memset (buf, '\0', sizeof (buf));
#pragma omp parallel for schedule (runtime)
for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL)
p[2] = -7;
for (i = 0; i < 64; i++)
if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
abort ();
return 0;
}
int
main (void)
{
test1 ();
test2 ();
test3 ();
omp_set_schedule (omp_sched_static, 0);
test4 ();
omp_set_schedule (omp_sched_static, 3);
test4 ();
omp_set_schedule (omp_sched_dynamic, 5);
test4 ();
omp_set_schedule (omp_sched_guided, 2);
test4 ();
return 0;
}
|
train.c | #define _GNU_SOURCE
#include <syscall.h>
#include <sched.h>
#include "graph.h"
#include "mainFunctions.h"
#include "powerperformacetracking.h"
#include "print.h"
#include <stdlib.h>
#include<unistd.h>
#define NO_OF_ARGS 2
//#define REPEAT 25
#define REPEAT 25
long long iters[8];
struct timeval start, end;
// We define all additional paramemter here
void setaffinity() {
/* #pragma omp parallel
{
cpu_set_t newcpu;
int threadid = omp_get_thread_num();
CPU_ZERO(&newcpu);
CPU_SET ( threadid , &newcpu) ;
int __t = sched_setaffinity ( syscall ( SYS_gettid ) , sizeof ( newcpu ) , &newcpu ) ;
assert(__t == 0);
}
*/
}
/*
void updateElementinArray(graph *G, int id) {
printf("The update Element Array %d \n", id);
char title[50];
node_t * G_member = (int*)malloc (G->numNodes * sizeof(int));
srand(0);
int i;
for(i = 0; i< G->numNodes; i++) {
G_member[i] = rand() % G->numNodes;
}
sprintf(title, "element_%d.csv",id);
gettimeofday(&start, NULL);
inittracking(title);
for(int abc = 0;abc<REPEAT; abc++) {
#pragma omp parallel
{
int threadid = omp_get_thread_num();
iters[threadid] = 0;
int t = 0;
#pragma omp for schedule(dynamic, PAR_CHUNKSIZE)
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) {
iters[threadid]++;
node_t j = G->node_idx [j_idx];
node_t k = G_member[j];
if(k > G->numNodes/2) {
t++;
}
}
printf("dummy %d \n",t);
printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]);
}
}
endtracking();
gettimeofday(&end, NULL);
printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000));
free(G_member);
}
*/
/* void updateArrayPerIteration(graph *G, int id) { */
/* printf("The update Array %d \n", id); */
/* node_t * G_member = (int*)malloc (G->numNodes * sizeof(int)); */
/* srand(0); */
/* int i; */
/* for(i = 0; i< G->numNodes; i++) { */
/* G_member[i] = rand() % G->numNodes; */
/* } */
/* char title[50]; */
/* sprintf(title, "array_%d.csv",id); */
/* gettimeofday(&start, NULL); */
/* inittracking(title); */
/* for(int abc=0;abc<REPEAT;abc++) { */
/* #pragma omp parallel */
/* { */
/* int threadid = omp_get_thread_num(); */
/* iters[threadid] = 0; */
/* int t = 0; */
/* #pragma omp for schedule(dynamic, 1024) */
/* for (node_t u1 = 0; u1 < G->numNodes; u1 ++) */
/* for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) { */
/* iters[threadid]++; */
/* node_t j = G->node_idx [j_idx]; */
/* for (edge_t k_idx = G->begin[j];k_idx < G->begin[j+1] ; k_idx ++) { */
/* node_t k = G_member[j]; */
/* if(k > G->numNodes/2) { */
/* t++; */
/* } */
/* } */
/* } */
/* printf("dummy %d \n",t); */
/* printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]); */
/* } */
/* } */
/* endtracking(); */
/* gettimeofday(&end, NULL); */
/* printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); */
/* free(G_member); */
/* } */
void updateMultipleArrayPerIteration(graph *G, int id) {
printf("The update multiple Array %d \n", id);
node_t * G_member = (int*)malloc (G->numNodes * sizeof(int));
srand(0);
int i;
for(i = 0; i< G->numNodes; i++) {
G_member[i] = rand() % G->numNodes;
}
char title[50];
sprintf(title, "multiple_%d.csv",id);
gettimeofday(&start, NULL);
inittracking(title);
for(int abc=0; abc < REPEAT; abc ++) {
#pragma omp parallel
{
int threadid = omp_get_thread_num();
iters[threadid] = 0;
int t = 0;
#pragma omp for schedule(dynamic, 1024)
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) {
iters[threadid]++;
node_t j = G->node_idx [j_idx];
for (edge_t k_idx = G->begin[j];k_idx < G->begin[j+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G->numNodes/2) {
t++;
}
}
node_t j_comp = ( G->numNodes - (j+1));
for (edge_t k_idx = G->begin[j_comp];k_idx < G->begin[j_comp+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G ->numNodes/2) {
t++;
}
}
}
printf("dummy %d \n",t);
printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]);
}
}
endtracking();
gettimeofday(&end, NULL);
printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000));
free(G_member);
}
/*
void updateMultipleArrayPerIteration(graph *G, int id) {
printf("The update multiple Array %d \n", id);
node_t * G_member = (int*)malloc (G->numNodes * sizeof(int));
srand(0);
int i;
for(i = 0; i< G->numNodes; i++) {
G_member[i] = rand() % G->numNodes;
}
char title[50];
sprintf(title, "multiple_%d.csv",id);
gettimeofday(&start, NULL);
inittracking(title);
for(int abc=0; abc < REPEAT; abc ++) {
#pragma omp parallel
{
int threadid = omp_get_thread_num();
iters[threadid] = 0;
int t = 0;
#pragma omp for schedule(dynamic, 1024)
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) {
iters[threadid]++;
node_t j = G->node_idx [j_idx];
for (edge_t k_idx = G->begin[u1];k_idx < G->begin[u1+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > j) {
for (edge_t k_idx = G->begin[u1];k_idx < G->begin[u1+1] ; k_idx ++) {
t++;
}
}
}
printf("dummy %d \n",t);
printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]);
}
}
endtracking();
gettimeofday(&end, NULL);
printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000));
free(G_member);
}*/
#define numTimes 4
/***
* Common entry point for all algorithms,
**/
int runalgo(int argc,char** argv) {
int i;
setaffinity();
graph* G = readGraph(argv[1], argv[2]);
for(i = 0;i< numTimes; i++) {
printf("Run %d \n", i);
/* updateElementinArray(G,i); */
/* sleep(1); */
/* updateArrayPerIteration(G,i); */
/* sleep(2); */
updateMultipleArrayPerIteration(G,i);
sleep(2);
/* updateLockperIteration(G,i); */
/* sleep(2); */
}
return 0;
}
inline void kernel(graph *G) {
}
|
SPHCalcHydroForceFunctor.h | /**
* @file SPHCalcHydroForceFunctor.h
* @author seckler
* @date 22.01.18
*/
#pragma once
#include "autopas/particles/OwnershipState.h"
#include "autopas/sph/SPHKernels.h"
namespace autopas::sph {
/**
* Class that defines the hydrodynamic force functor.
* It is used to calculate the force based on the given SPH kernels.
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class SPHCalcHydroForceFunctor : public Functor<Particle, SPHCalcHydroForceFunctor<Particle>> {
public:
/// soa arrays type
using SoAArraysType = typename Particle::SoAArraysType;
SPHCalcHydroForceFunctor()
// the actual cutoff used is dynamic. 0 is used to pass the sanity check.
: autopas::Functor<Particle, SPHCalcHydroForceFunctor<Particle>>(0.){};
bool isRelevantForTuning() override { return true; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
[[nodiscard]] bool isAppropriateClusterSize(unsigned int clusterSize,
DataLayoutOption::Value dataLayout) const override {
return dataLayout == DataLayoutOption::aos; // This functor does only support clusters via aos.
}
/**
* Calculates the contribution of the interaction of particle i and j to the
* hydrodynamic force.
* It is not symmetric, because the smoothing lenghts of the two particles can
* be different.
* @param i first particle of the interaction
* @param j second particle of the interaction
* @param newton3 defines whether or whether not to use newton 3
*/
void AoSFunctor(Particle &i, Particle &j, bool newton3 = true) override {
if (i.isDummy() or j.isDummy()) {
return;
}
const std::array<double, 3> dr = utils::ArrayMath::sub(i.getR(), j.getR());
// const PS::F64vec dr = ep_i[i].pos - ep_j[j].pos;
double cutoff = i.getSmoothingLength() * autopas::sph::SPHKernels::getKernelSupportRadius();
if (autopas::utils::ArrayMath::dot(dr, dr) >= cutoff * cutoff) {
return;
}
const std::array<double, 3> dv = utils::ArrayMath::sub(i.getV(), j.getV());
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = utils::ArrayMath::dot(dv, dr);
const double w_ij = (dvdr < 0) ? dvdr / utils::ArrayMath::L2Norm(dr) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = i.getSoundSpeed() + j.getSoundSpeed() - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
i.checkAndSetVSigMax(v_sig);
if (newton3) {
j.checkAndSetVSigMax(v_sig); // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (i.getDensity() + j.getDensity()));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW(dr, i.getSmoothingLength()),
SPHKernels::gradW(dr, j.getSmoothingLength())),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale =
i.getPressure() / (i.getDensity() * i.getDensity()) + j.getPressure() / (j.getDensity() * j.getDensity()) + AV;
i.subAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * j.getMass()));
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
if (newton3) {
j.addAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * i.getMass()));
// Newton3, gradW_ij = -gradW_ji
}
double scale2i = j.getMass() * (i.getPressure() / (i.getDensity() * i.getDensity()) + 0.5 * AV);
i.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2i);
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j = i.getMass() * (j.getPressure() / (j.getDensity() * j.getDensity()) + 0.5 * AV);
j.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2j);
// Newton 3
}
}
/**
* @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType>, bool)
* This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3.
*/
void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict soundSpeedptr = soa.template begin<Particle::AttributeNames::soundSpeed>();
double *const __restrict pressureptr = soa.template begin<Particle::AttributeNames::pressure>();
double *const __restrict vsigmaxptr = soa.template begin<Particle::AttributeNames::vsigmax>();
double *const __restrict engDotptr = soa.template begin<Particle::AttributeNames::engDot>();
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict velXptr = soa.template begin<Particle::AttributeNames::velX>();
double *const __restrict velYptr = soa.template begin<Particle::AttributeNames::velY>();
double *const __restrict velZptr = soa.template begin<Particle::AttributeNames::velZ>();
double *const __restrict accXptr = soa.template begin<Particle::AttributeNames::accX>();
double *const __restrict accYptr = soa.template begin<Particle::AttributeNames::accY>();
double *const __restrict accZptr = soa.template begin<Particle::AttributeNames::accZ>();
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
for (unsigned int indexFirst = 0; indexFirst < soa.getNumParticles(); ++indexFirst) {
// checks whether particle i is owned.
if (ownedStatePtr[indexFirst] == OwnershipState::dummy) {
continue;
}
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = indexFirst + 1; j < soa.getNumParticles(); ++j) {
const double drx = xptr[indexFirst] - xptr[j];
const double dry = yptr[indexFirst] - yptr[j];
const double drz = zptr[indexFirst] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff or ownedStatePtr[j] == OwnershipState::dummy) continue;
const double dvX = velXptr[indexFirst] - velXptr[j];
const double dvY = velYptr[indexFirst] - velYptr[j];
const double dvZ = velZptr[indexFirst] - velZptr[j];
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr[indexFirst] + soundSpeedptr[j] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
// vsigmaxptr[j] = std::max(vsigmaxptr[j], v_sig); // Newton 3
vsigmaxptr[j] = vsigmaxptr[j] > v_sig ? vsigmaxptr[j] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[indexFirst] + densityptr[j]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[indexFirst]),
SPHKernels::gradW({drx, dry, drz}, smthptr[j])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale = pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) +
pressureptr[j] / (densityptr[j] * densityptr[j]) + AV;
const double massscale = scale * massptr[j];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
const double massscale2 = scale * massptr[indexFirst];
accXptr[j] += gradW_ij[0] * massscale2;
accYptr[j] += gradW_ij[1] * massscale2;
accZptr[j] += gradW_ij[2] * massscale2;
// Newton3, gradW_ij = -gradW_ji
double scale2i =
massptr[j] * (pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
double scale2j = massptr[indexFirst] * (pressureptr[j] / (densityptr[j] * densityptr[j]) + 0.5 * AV);
engDotptr[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
engDotptr[indexFirst] += localengdotsum;
accXptr[indexFirst] += localAccX;
accYptr[indexFirst] += localAccY;
accZptr[indexFirst] += localAccZ;
vsigmaxptr[indexFirst] = std::max(localvsigmax, vsigmaxptr[indexFirst]);
}
}
/**
* @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool)
*/
void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override {
if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return;
double *const __restrict massptr1 = soa1.template begin<Particle::AttributeNames::mass>();
double *const __restrict densityptr1 = soa1.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr1 = soa1.template begin<Particle::AttributeNames::smth>();
double *const __restrict soundSpeedptr1 = soa1.template begin<Particle::AttributeNames::soundSpeed>();
double *const __restrict pressureptr1 = soa1.template begin<Particle::AttributeNames::pressure>();
double *const __restrict vsigmaxptr1 = soa1.template begin<Particle::AttributeNames::vsigmax>();
double *const __restrict engDotptr1 = soa1.template begin<Particle::AttributeNames::engDot>();
double *const __restrict xptr1 = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr1 = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr1 = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict velXptr1 = soa1.template begin<Particle::AttributeNames::velX>();
double *const __restrict velYptr1 = soa1.template begin<Particle::AttributeNames::velY>();
double *const __restrict velZptr1 = soa1.template begin<Particle::AttributeNames::velZ>();
double *const __restrict accXptr1 = soa1.template begin<Particle::AttributeNames::accX>();
double *const __restrict accYptr1 = soa1.template begin<Particle::AttributeNames::accY>();
double *const __restrict accZptr1 = soa1.template begin<Particle::AttributeNames::accZ>();
double *const __restrict massptr2 = soa2.template begin<Particle::AttributeNames::mass>();
double *const __restrict densityptr2 = soa2.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr2 = soa2.template begin<Particle::AttributeNames::smth>();
double *const __restrict soundSpeedptr2 = soa2.template begin<Particle::AttributeNames::soundSpeed>();
double *const __restrict pressureptr2 = soa2.template begin<Particle::AttributeNames::pressure>();
double *const __restrict vsigmaxptr2 = soa2.template begin<Particle::AttributeNames::vsigmax>();
double *const __restrict engDotptr2 = soa2.template begin<Particle::AttributeNames::engDot>();
double *const __restrict xptr2 = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr2 = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr2 = soa2.template begin<Particle::AttributeNames::posZ>();
double *const __restrict velXptr2 = soa2.template begin<Particle::AttributeNames::velX>();
double *const __restrict velYptr2 = soa2.template begin<Particle::AttributeNames::velY>();
double *const __restrict velZptr2 = soa2.template begin<Particle::AttributeNames::velZ>();
double *const __restrict accXptr2 = soa2.template begin<Particle::AttributeNames::accX>();
double *const __restrict accYptr2 = soa2.template begin<Particle::AttributeNames::accY>();
double *const __restrict accZptr2 = soa2.template begin<Particle::AttributeNames::accZ>();
const auto *const __restrict ownedStatePtr1 = soa1.template begin<Particle::AttributeNames::ownershipState>();
const auto *const __restrict ownedStatePtr2 = soa2.template begin<Particle::AttributeNames::ownershipState>();
for (unsigned int indexFirst = 0; indexFirst < soa1.getNumParticles(); ++indexFirst) {
// checks whether particle i is owned.
if (ownedStatePtr1[indexFirst] == OwnershipState::dummy) {
continue;
}
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) {
const double drx = xptr1[indexFirst] - xptr2[j];
const double dry = yptr1[indexFirst] - yptr2[j];
const double drz = zptr1[indexFirst] - zptr2[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr1[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff or ownedStatePtr2[j] == OwnershipState::dummy) continue;
const double dvX = velXptr1[indexFirst] - velXptr2[j];
const double dvY = velYptr1[indexFirst] - velYptr2[j];
const double dvZ = velZptr1[indexFirst] - velZptr2[j];
// const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr1[indexFirst] + soundSpeedptr2[j] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
if (newton3) {
// vsigmaxptr2[j] = std::max(vsigmaxptr2[j], v_sig); // Newton 3
vsigmaxptr2[j] = vsigmaxptr2[j] > v_sig ? vsigmaxptr2[j] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr1[indexFirst] + densityptr2[j]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[j].dens));
const std::array<double, 3> gradW_ij =
utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr1[indexFirst]),
SPHKernels::gradW({drx, dry, drz}, smthptr2[j])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[j].smth));
double scale = pressureptr1[indexFirst] / (densityptr1[indexFirst] * densityptr1[indexFirst]) +
pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + AV;
const double massscale = scale * massptr2[j];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) *
// gradW_ij;
if (newton3) {
const double massscale = scale * massptr1[indexFirst];
accXptr2[j] += gradW_ij[0] * massscale;
accYptr2[j] += gradW_ij[1] * massscale;
accZptr2[j] += gradW_ij[2] * massscale;
// Newton3, gradW_ij = -gradW_ji
}
double scale2i =
massptr2[j] * (pressureptr1[indexFirst] / (densityptr1[indexFirst] * densityptr1[indexFirst]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j = massptr1[indexFirst] * (pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + 0.5 * AV);
engDotptr2[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
}
engDotptr1[indexFirst] += localengdotsum;
accXptr1[indexFirst] += localAccX;
accYptr1[indexFirst] += localAccY;
accZptr1[indexFirst] += localAccZ;
vsigmaxptr1[indexFirst] = std::max(localvsigmax, vsigmaxptr1[indexFirst]);
}
}
// clang-format off
/**
* @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3)
*/
// clang-format on
void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool newton3) override {
if (soa.getNumParticles() == 0) return;
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
// checks whether particle i is owned.
if (ownedStatePtr[indexFirst] == OwnershipState::dummy) {
return;
}
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict soundSpeedptr = soa.template begin<Particle::AttributeNames::soundSpeed>();
double *const __restrict pressureptr = soa.template begin<Particle::AttributeNames::pressure>();
double *const __restrict vsigmaxptr = soa.template begin<Particle::AttributeNames::vsigmax>();
double *const __restrict engDotptr = soa.template begin<Particle::AttributeNames::engDot>();
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict velXptr = soa.template begin<Particle::AttributeNames::velX>();
double *const __restrict velYptr = soa.template begin<Particle::AttributeNames::velY>();
double *const __restrict velZptr = soa.template begin<Particle::AttributeNames::velZ>();
double *const __restrict accXptr = soa.template begin<Particle::AttributeNames::accX>();
double *const __restrict accYptr = soa.template begin<Particle::AttributeNames::accY>();
double *const __restrict accZptr = soa.template begin<Particle::AttributeNames::accZ>();
double localvsigmax = 0.;
double localengdotsum = 0.;
double localAccX = 0.;
double localAccY = 0.;
double localAccZ = 0.;
const auto ¤tList = neighborList;
size_t listSize = currentList.size();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
//#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax)
for (unsigned int j = 0; j < listSize; ++j) {
const double drx = xptr[indexFirst] - xptr[currentList[j]];
const double dry = yptr[indexFirst] - yptr[currentList[j]];
const double drz = zptr[indexFirst] - zptr[currentList[j]];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
double cutoff = smthptr[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius();
if (dr2 >= cutoff * cutoff or ownedStatePtr[currentList[j]] == OwnershipState::dummy) continue;
const double dvX = velXptr[indexFirst] - velXptr[currentList[j]];
const double dvY = velYptr[indexFirst] - velYptr[currentList[j]];
const double dvZ = velZptr[indexFirst] - velZptr[currentList[j]];
// const PS::F64vec dv = ep_i[i].vel - ep_j[currentList[j]].vel;
double dvdr = dvX * drx + dvY * dry + dvZ * drz;
const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0;
// const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0;
const double v_sig = soundSpeedptr[indexFirst] + soundSpeedptr[currentList[j]] - 3.0 * w_ij;
// const PS::F64 v_sig = ep_i[i].snds + ep_j[currentList[j]].snds - 3.0 * w_ij;
localvsigmax = std::max(localvsigmax, v_sig);
if (newton3) {
// vsigmaxptr[currentList[j]] = std::max(vsigmaxptr[currentList[j]], v_sig); // Newton 3
vsigmaxptr[currentList[j]] =
vsigmaxptr[currentList[j]] > v_sig ? vsigmaxptr[currentList[j]] : v_sig; // Newton 3
// v_sig_max = std::max(v_sig_max, v_sig);
}
const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[indexFirst] + densityptr[currentList[j]]));
// const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens +
// ep_j[currentList[j]].dens));
const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar(
utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[indexFirst]),
SPHKernels::gradW({drx, dry, drz}, smthptr[currentList[j]])),
0.5);
// const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr,
// ep_j[currentList[j]].smth));
double scale = pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) +
pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + AV;
const double massscale = scale * massptr[currentList[j]];
localAccX -= gradW_ij[0] * massscale;
localAccY -= gradW_ij[1] * massscale;
localAccZ -= gradW_ij[2] * massscale;
// hydro[i].acc -= ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + ep_j[currentList[j]].pres / (ep_j[currentList[j]].dens * ep_j[currentList[j]].dens) + AV) *
// gradW_ij;
if (newton3) {
const double massscale = scale * massptr[indexFirst];
accXptr[currentList[j]] += gradW_ij[0] * massscale;
accYptr[currentList[j]] += gradW_ij[1] * massscale;
accZptr[currentList[j]] += gradW_ij[2] * massscale;
// Newton3, gradW_ij = -gradW_ji
}
double scale2i = massptr[currentList[j]] *
(pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + 0.5 * AV);
localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i;
// hydro[i].eng_dot += ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens *
// ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij;
if (newton3) {
double scale2j =
massptr[indexFirst] *
(pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + 0.5 * AV);
engDotptr[currentList[j]] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j;
// Newton 3
}
}
engDotptr[indexFirst] += localengdotsum;
accXptr[indexFirst] += localAccX;
accYptr[indexFirst] += localAccY;
accZptr[indexFirst] += localAccZ;
vsigmaxptr[indexFirst] = std::max(localvsigmax, vsigmaxptr[indexFirst]);
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static auto getNeededAttr() {
return std::array<typename Particle::AttributeNames, 17>{
Particle::AttributeNames::mass, Particle::AttributeNames::density,
Particle::AttributeNames::smth, Particle::AttributeNames::soundSpeed,
Particle::AttributeNames::pressure, Particle::AttributeNames::vsigmax,
Particle::AttributeNames::engDot, Particle::AttributeNames::posX,
Particle::AttributeNames::posY, Particle::AttributeNames::posZ,
Particle::AttributeNames::velX, Particle::AttributeNames::velY,
Particle::AttributeNames::velZ, Particle::AttributeNames::accX,
Particle::AttributeNames::accY, Particle::AttributeNames::accZ,
Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static auto getNeededAttr(std::false_type) {
return std::array<typename Particle::AttributeNames, 12>{
Particle::AttributeNames::mass, Particle::AttributeNames::density,
Particle::AttributeNames::smth, Particle::AttributeNames::soundSpeed,
Particle::AttributeNames::pressure, Particle::AttributeNames::posX,
Particle::AttributeNames::posY, Particle::AttributeNames::posZ,
Particle::AttributeNames::velX, Particle::AttributeNames::velY,
Particle::AttributeNames::velZ, Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static auto getComputedAttr() {
return std::array<typename Particle::AttributeNames, 6>{
Particle::AttributeNames::vsigmax, Particle::AttributeNames::engDot, Particle::AttributeNames::accX,
Particle::AttributeNames::accY, Particle::AttributeNames::accZ, Particle::AttributeNames::ownershipState};
}
/**
* Get the number of floating point operations used in one full kernel call
* @return the number of floating point operations
*/
static uint64_t getNumFlopsPerKernelCall() {
///@todo return correct flopcount
return 1ul;
}
};
} // namespace autopas::sph
|
gimple.h | /* Gimple IR definitions.
Copyright 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "vecprim.h"
#include "vecir.h"
#include "ggc.h"
#include "basic-block.h"
#include "tree-ssa-operands.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
struct gimple_seq_node_d;
typedef struct gimple_seq_node_d *gimple_seq_node;
typedef const struct gimple_seq_node_d *const_gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef VEC(gimple, heap) *gimple_vec;
DEF_VEC_P (gimple_vec);
DEF_VEC_ALLOC_P (gimple_vec, heap);
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_FROM_THUNK = 1 << 0,
GF_CALL_RETURN_SLOT_OPT = 1 << 1,
GF_CALL_TAILCALL = 1 << 2,
GF_CALL_VA_ARG_PACK = 1 << 3,
GF_CALL_NOTHROW = 1 << 4,
GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
GF_CALL_INTERNAL = 1 << 6,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_OMP_ATOMIC_NEED_VALUE = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there are only two types of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0,
GIMPLE_DEBUG_SOURCE_BIND = 1
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* A node in a gimple_seq_d. */
struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d {
gimple stmt;
struct gimple_seq_node_d *prev;
struct gimple_seq_node_d *next;
};
/* A double-linked sequence of gimple statements. */
struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d {
/* First and last statements in the sequence. */
gimple_seq_node first;
gimple_seq_node last;
/* Sequences are created/destroyed frequently. To minimize
allocation activity, deallocated sequences are kept in a pool of
available sequences. This is the pointer to the next free
sequence in the pool. */
gimple_seq next_free;
};
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (const_gimple_seq s)
{
return s ? s->first : NULL;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return (n) ? n->stmt : NULL;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (const_gimple_seq s)
{
return s ? s->last : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return (n) ? n->stmt : NULL;
}
/* Set the last node in GIMPLE sequence S to LAST. */
static inline void
gimple_seq_set_last (gimple_seq s, gimple_seq_node last)
{
s->last = last;
}
/* Set the first node in GIMPLE sequence S to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq s, gimple_seq_node first)
{
s->first = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (const_gimple_seq s)
{
return s == NULL || s->first == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
bb->il.gimple->seq = seq;
}
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY(()) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
struct basic_block_def *bb;
/* [ WORD 4 ]
Lexical block holding this statement. */
tree block;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct def_optype_d GTY((skip (""))) *def_ops;
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7-8 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
struct GTY(()) gimple_statement_call
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9-12 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 13 ] */
union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
/* [ WORD 14 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 6 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree types;
/* [ WORD 6 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Filter types. */
tree types;
/* [ WORD 6 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_ELSE */
struct GTY(()) gimple_statement_eh_else {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5,6 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 6 ] */
tree result;
/* [ WORD 7 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 6 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 5 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
__asm__ statement. */
const char *string;
/* [ WORD 10 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 11 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 8 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 9 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Clauses. */
tree clauses;
/* [ WORD 7 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 8 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-8 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 9 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 10-11 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree control_def;
/* [ WORD 6 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree val;
};
/* GIMPLE_TRANSACTION. */
/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
/* The __transaction_atomic was declared [[outer]] or it is
__transaction_relaxed. */
#define GTMA_IS_OUTER (1u << 0)
#define GTMA_IS_RELAXED (1u << 1)
#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
/* The transaction is seen to not have an abort. */
#define GTMA_HAVE_ABORT (1u << 2)
/* The transaction is seen to have loads or stores. */
#define GTMA_HAVE_LOAD (1u << 3)
#define GTMA_HAVE_STORE (1u << 4)
/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
/* The transaction WILL enter serial irrevocable mode.
An irrevocable block post-dominates the entire transaction, such
that all invocations of the transaction will go serial-irrevocable.
In such case, we don't bother instrumenting the transaction, and
tell the runtime that it should begin the transaction in
serial-irrevocable mode. */
#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
struct GTY(()) gimple_statement_transaction
{
/* [ WORD 1-10 ] */
struct gimple_statement_with_memory_ops_base gsbase;
/* [ WORD 11 ] */
gimple_seq body;
/* [ WORD 12 ] */
tree label;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *);
gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree,
tree, tree MEM_STAT_DECL);
#define gimple_build_assign_with_ops(c,o1,o2,o3) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO)
#define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO)
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, VEC(tree, heap) *);
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
gimple gimple_build_call_internal_vec (enum internal_fn, VEC(tree, heap) *);
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
VEC(tree,gc) *, VEC(tree,gc) *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (unsigned, tree, tree, ...);
gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
bool gimple_call_same_target_p (const_gimple, const_gimple);
int gimple_call_flags (const_gimple);
int gimple_call_return_flags (const_gimple);
int gimple_call_arg_flags (const_gimple, unsigned);
void gimple_call_reset_alias_info (gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, struct basic_block_def *);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
void gimple_set_modified (gimple, bool);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_could_trap_p_1 (gimple, bool, bool);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree);
void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree);
tree gimple_extract_devirt_binfo_from_cst (tree);
/* Returns true iff T is a valid GIMPLE statement. */
extern bool is_gimple_stmt (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid address operand of a MEM_REF. */
bool is_gimple_mem_ref_addr (tree);
/* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a
GIMPLE temporary, a renamed user variable, or something else,
respectively. */
extern bool is_gimple_reg_rhs (tree);
extern bool is_gimple_mem_rhs (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
extern void recalculate_side_effects (tree);
extern bool gimple_compare_field_offset (tree, tree);
extern tree gimple_register_type (tree);
extern tree gimple_register_canonical_type (tree);
extern void print_gimple_types_stats (void);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool walk_stmt_load_store_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool gimple_ior_addresses_taken (bitmap, gimple);
extern bool gimple_call_builtin_class_p (gimple, enum built_in_class);
extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree create_tmp_reg (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
VEC(gimple,heap) *bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
bool in_cleanup_point_expr;
};
/* Return true if gimplify_one_sizepos doesn't need to gimplify
expr (when in TYPE_SIZE{,_UNIT} and similar type/decl size/bitsize
fields). */
static inline bool
is_gimple_sizepos (tree expr)
{
/* gimplify_one_sizepos doesn't need to do anything if the value isn't there,
is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do
anything if it's already a VAR_DECL. If it's a VAR_DECL from another
function, the gimplifier will want to replace it with a new variable,
but that will cause problems if this type is from outside the function.
It's OK to have that here. */
return (expr == NULL_TREE
|| TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr));
}
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern VEC(gimple, heap) *gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In trans-mem.c. */
extern void diagnose_tm_safe_errors (tree);
extern void compute_transaction_bits (void);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
case GIMPLE_TRANSACTION:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline struct basic_block_def *
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return g->gsbase.block;
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
g->gsbase.block = block;
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return gimple_location (g) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of DEF operands for statement G. */
static inline struct def_optype_d *
gimple_def_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.def_ops;
}
/* Set DEF to be the set of DEF operands for statement G. */
static inline void
gimple_set_def_ops (gimple g, struct def_optype_d *def)
{
gcc_gimple_checking_assert (gimple_has_ops (g));
g->gsops.opbase.def_ops = def;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_gimple_checking_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (const_gimple g)
{
struct def_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
ops = g->gsops.opbase.def_ops;
if (ops
&& DEF_OP_PTR (ops) == &g->gsmembase.vdef)
return DEF_OP_PTR (ops);
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
return CALL_EXPR;
}
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if BB is in a transaction. */
static inline bool
block_in_transaction (basic_block bb)
{
return flag_tm && bb->flags & BB_IN_TRANSACTION;
}
/* Return true if STMT is in a transaction. */
static inline bool
gimple_in_transaction (gimple stmt)
{
return block_in_transaction (gimple_bb (stmt));
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
static inline bool
gimple_omp_atomic_need_value_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
}
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
static inline void
gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_gimple_checking_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* A wrapper around extract_ops_from_tree_1, for callers which expect
to see only a maximum of two operands. */
static inline void
extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
tree *op1)
{
tree op2;
extract_ops_from_tree_1 (expr, code, op0, op1, &op2);
gcc_assert (op2 == NULL_TREE);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = (enum tree_code) gs->gsbase.subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if GS is an assignment with a singleton RHS, i.e.,
there is no operator associated with the assignment itself.
Unlike gimple_assign_copy_p, this predicate returns true for
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
static inline bool
gimple_assign_single_p (gimple gs)
{
return (is_gimple_assign (gs)
&& gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if S is a clobber statement. */
static inline bool
gimple_clobber_p (gimple s)
{
return gimple_assign_single_p (s)
&& TREE_CLOBBER_P (gimple_assign_rhs1 (s));
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0;
}
/* Return the target of internal call GS. */
static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return gs->gimple_call.u.internal_fn;
}
/* Return the function type of the function called by GS. */
static inline tree
gimple_call_fntype (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
if (gimple_call_internal_p (gs))
return NULL_TREE;
return gs->gimple_call.u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gs->gimple_call.u.fntype = fntype;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* Set internal function FN to be the function called by call statement GS. */
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
gs->gimple_call.u.internal_fn = fn;
}
/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL
associated with the callee if known. Otherwise return NULL_TREE. */
static inline tree
gimple_call_addr_fndecl (const_tree fn)
{
if (fn && TREE_CODE (fn) == ADDR_EXPR)
{
tree fndecl = TREE_OPERAND (fn, 0);
if (TREE_CODE (fndecl) == MEM_REF
&& TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR
&& integer_zerop (TREE_OPERAND (fndecl, 1)))
fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0);
if (TREE_CODE (fndecl) == FUNCTION_DECL)
return fndecl;
}
return NULL_TREE;
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree type = gimple_call_fntype (gs);
if (type == NULL_TREE)
return TREE_TYPE (gimple_call_lhs (gs));
/* The type returned by a function is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
static inline void
gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_used;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_clobbered;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.body;
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.ni);
return gimple_op (gs, index);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.ni);
return gimple_op_ptr (gs, index);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.ni
&& TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.no);
return gimple_op (gs, index + gs->gimple_asm.ni);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.no);
return gimple_op_ptr (gs, index + gs->gimple_asm.ni);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.nc
&& TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index <= gs->gimple_asm.nl
&& TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.handler;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.failure;
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return gs->gimple_eh_else.n_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return gs->gimple_eh_else.e_body;
}
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.e_body = seq;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.eval;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.cleanup;
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gimple_wce.cleanup;
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs);
gs->gimple_phi.args[index] = *phiarg;
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_gimple_checking_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
&& (label == NULL_TREE
|| TREE_CODE (label) == CASE_LABEL_EXPR));
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
return gimple_switch_label (gs, 0);
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline tree
gimple_debug_source_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline tree
gimple_debug_source_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
static inline tree *
gimple_debug_source_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline void
gimple_debug_source_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline void
gimple_debug_source_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return gs->omp.body;
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.pre_body;
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
&& i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gimple_transaction.body;
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
static inline tree
gimple_transaction_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gimple_transaction.label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return &gs->gimple_transaction.label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gsbase.subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gsbase.subcode = subcode;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statment STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return true if TYPE is a suitable type for a scalar register variable. */
static inline bool
is_gimple_reg_type (tree type)
{
return !AGGREGATE_TYPE_P (type);
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->prev;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr->stmt;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return a pointer to the current stmt.
NOTE: You may want to use gsi_replace on the iterator itself,
as this performs additional bookkeeping that will not be done
if you simply assign through a pointer returned by gsi_stmt_ptr. */
static inline gimple *
gsi_stmt_ptr (gimple_stmt_iterator *i)
{
return &i->ptr->stmt;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
gimple_seq gsi_split_seq_before (gimple_stmt_iterator *);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
BOOL_BITFIELD val_only : 1;
/* True if we are currently walking the LHS of an assignment. */
BOOL_BITFIELD is_lhs : 1;
/* Optional. Set to true by the callback functions if they made any
changes. */
BOOL_BITFIELD changed : 1;
/* True if we're interested in location information. */
BOOL_BITFIELD want_locations : 1;
/* True if we've removed the statement that was processed. */
BOOL_BITFIELD removed_stmt : 1;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
#ifdef GATHER_STATISTICS
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_seq, /* Sequences. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
#endif /* GATHER_STATISTICS */
extern void dump_gimple_statistics (void);
/* In gimple-fold.c. */
void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree);
tree gimple_fold_builtin (gimple);
bool fold_stmt (gimple_stmt_iterator *);
bool fold_stmt_inplace (gimple_stmt_iterator *);
tree get_symbol_constant_value (tree);
tree canonicalize_constructor_val (tree);
extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
bool gimple_val_nonnegative_real_p (tree);
#endif /* GCC_GIMPLE_H */
|
rgb_matrix.h | #ifndef RGB_MATRIX_H
#define RGB_MATRIX_H
#include <stdio.h>
#include <math.h>
#define PIX_MAX_NORM 1
#define PIX_MIN_NORM 0
/*
* Represents symetric 2x2 matrix that contains RGB values of the given pixel.
* Top left element represents R value, bottom left B value and other 2 elements G value.
*/
template<typename T>
struct RGBMatrix {
T r;
T g;
T b;
//Constructor of struct RGBmatrix
__host__ __device__ RGBMatrix(T r, T g, T b) : r(r), g(g), b(b) {}
//Default constructor of struct RGBMatrix. All values are set to 0.0
__host__ __device__ RGBMatrix() : RGBMatrix(0.0, 0.0, 0.0) {}
/*
* Converts the image vector containing rgb values to the vector containing
* rgb matrices. Memory for the new vector needs to be allocated and passed
* as a vector argument. It should be array of RGBMatrix values which size is equal
* as the size of the image. Pointers r, g and b are the pointers to the values
* of the red, green and blue
*/
static void __host__ __device__ rgb2matrix(T *r, T *g, T *b, RGBMatrix<T> *vector, int size);
/*
* Converts the vector containing RGBMatrices to vector containina RGB values.
*/
static void __host__ __device__ matrix2rgb(RGBMatrix<T> *vector, T *r, T *g, T *b, int size);
//Prints RGBMatrix
void __host__ __device__ print();
//Returns minimal RGBMatrix
static RGBMatrix __host__ __device__ min();
//Return maximal RGBMatrix
static RGBMatrix __host__ __device__ max();
//Represents operator <. Matrices are ordered in terms of lexicographic order due to the RGB values
template<typename U>
friend bool __host__ __device__ operator<(RGBMatrix<U> &a, RGBMatrix<U> &b);
//Represents operator >. Matrices are ordered in terms of lexicographic order due to the RGB values
template<typename U>
friend bool __host__ __device__ operator>(RGBMatrix<U> &a, RGBMatrix<U> &b);
};
template<typename T>
void __host__ __device__ RGBMatrix<T>::print() {
printf("(%f, %f, %f)", r, g, b);
}
template<typename T>
void __host__ __device__ RGBMatrix<T>::rgb2matrix(T *r, T *g, T *b, RGBMatrix<T> *vector, int size) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
RGBMatrix *temp = vector + i;
temp->r = r[i];
temp->g = g[i];
temp->b = b[i];
}
}
template<typename T>
void __host__ __device__ RGBMatrix<T>::matrix2rgb(RGBMatrix<T> *vector, T *r, T *g, T *b, int size) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
RGBMatrix *temp = vector + i;
r[i] = temp->r;
g[i] = temp->g;
b[i] = temp->b;
}
}
template<typename T>
bool __host__ __device__ operator<(RGBMatrix<T> &a, RGBMatrix<T> &b) {
if (a.r < b.r) {
return true;
} else if (a.r == b.r) {
if (a.g < b.g) {
return true;
} else if (a.g == b.g) {
return (a.b < b.b);
} else {
return false;
}
} else {
return false;
}
}
template<typename T>
bool __host__ __device__ operator>(RGBMatrix<T> &a, RGBMatrix<T> &b) {
if (a.r > b.r) {
return true;
} else if (a.r == b.r) {
if (a.g > b.g) {
return true;
} else if (a.g == b.g) {
return (a.b > b.b);
} else {
return false;
}
} else {
return false;
}
}
template<typename T>
RGBMatrix<T> __host__ __device__ RGBMatrix<T>::max() {
return RGBMatrix<T>(PIX_MAX_NORM, PIX_MAX_NORM, PIX_MAX_NORM);
}
template<typename T>
RGBMatrix<T> __host__ __device__ RGBMatrix<T>::min() {
return RGBMatrix<T>(PIX_MIN_NORM, PIX_MIN_NORM, PIX_MIN_NORM);
}
#endif
|
theta2h.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "compearth.h"
/*!
* @brief Computes h from dip angle theta as in Equation 24c of
* Tape and Tape, 2015.
*
* @param[in] n Number of points in array
* @param[in] theta Dip angle \f$ \theta \in [0, \pi/2] \f$. This
* is an array of dimension [n].
*
* @param[out] h h of Equation 24c \f$ h \in [0,1] \f$. This
* is an array of dimension [n].
*
* @copyright MIT
*
*/
void compearth_theta2h(const int n, const double *__restrict__ theta,
double *__restrict__ h)
{
int i;
#pragma omp simd
for (i=0; i<n; i++)
{
h[i] = cos(theta[i]);
}
return;
}
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY)
#ifndef INTEL_MKL
#error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL"
#endif
#endif
#if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY)
#error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined"
#endif
#ifdef INTEL_MKL_ML_ONLY
#error \
"Compiling for INTEL MKL ML only is no longer supported.Please use MKL DNN (the default option for --config=mkl)"
#endif
#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
// For use with MKL ML, has been deprecated
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
// The dimensions order that MKL DNN internally uses for 2D activations
// [Batch, Channel, Height, Width] and
// for 2D filters [Out_Channel, In_Channel, Height, Width].
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
// The dimensions order that MKL DNN internally uses for 3D activations
// [Batch, Channel, Depth, Height, Width] and
// for 3D filters [Out_Channel, In_Channel, Depth, Height, Width].
typedef enum {
Dim3d_N = 0,
Dim3d_C = 1,
Dim3d_D = 2,
Dim3d_H = 3,
Dim3d_W = 4,
Dim3d_O = 0,
Dim3d_I = 1
} MklDnnDims3D;
// Enum for the order of dimensions of a TF 2D filter with shape [filter_height,
// filter_width, in_channels, out_channels]
typedef enum {
TF_2DFILTER_DIM_H = 0,
TF_2DFILTER_DIM_W = 1,
TF_2DFILTER_DIM_I = 2,
TF_2DFILTER_DIM_O = 3
} TFFilterDims2d;
// Enum for the order of dimensions of a TF 3D filter with shape [filter_depth,
// filter_height, filter_width, in_channels, out_channels]
typedef enum {
TF_3DFILTER_DIM_P = 0,
TF_3DFILTER_DIM_H = 1,
TF_3DFILTER_DIM_W = 2,
TF_3DFILTER_DIM_I = 3,
TF_3DFILTER_DIM_O = 4
} TFFilterDims3d;
// The dimensions order that MKL DNN requires for the filter in a grouped
// convolution (2D only)
typedef enum {
MKL_GROUP_FILTER_DIM_G = 0,
MKL_GROUP_FILTER_DIM_O = 1,
MKL_GROUP_FILTER_DIM_I = 2,
MKL_GROUP_FILTER_DIM_H = 3,
MKL_GROUP_FILTER_DIM_W = 4
} MklDnnFilterGroupDims;
// Enum used to templatize MklOp kernel implementations
// that support both fp32 and int8 versions.
enum class MklQuantization {
QUANTIZED_VERSION,
FP_VERSION,
};
static const int kSmallBatchSize = 32;
#ifdef INTEL_MKL_ML_ONLY
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format);
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline size_t GetDimension3D(char dimension) const {
int index = GetMklDnnTensor3DDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int32 GetMklDnnTensor3DDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims3D::Dim3d_N;
case 'C':
return MklDnnDims3D::Dim3d_C;
case 'D':
return MklDnnDims3D::Dim3d_D;
case 'H':
return MklDnnDims3D::Dim3d_H;
case 'W':
return MklDnnDims3D::Dim3d_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
if (dimension == 5) {
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<3>(data_format, '0')] =
MklDnnDims3D::Dim3d_D;
data_.map_[GetTensorDimIndex<3>(data_format, '1')] =
MklDnnDims3D::Dim3d_H;
data_.map_[GetTensorDimIndex<3>(data_format, '2')] =
MklDnnDims3D::Dim3d_W;
data_.map_[GetTensorDimIndex<3>(data_format, 'C')] =
MklDnnDims3D::Dim3d_C;
data_.map_[GetTensorDimIndex<3>(data_format, 'N')] =
MklDnnDims3D::Dim3d_N;
} else {
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML_ONLY
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML_ONLY
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T>
class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();
;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape,
&output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML_ONLY
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML_ONLY
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML_ONLY
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML_ONLY
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML_ONLY
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML_ONLY
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML_ONLY
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML_ONLY
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML_ONLY
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
template <>
memory::data_type MklDnnType<quint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<qint8>() {
return memory::data_type::s8;
}
template <>
memory::data_type MklDnnType<qint32>() {
return memory::data_type::s32;
}
/// Map TensorFlow's data format into MKL-DNN 3D data format
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::ndhwc;
else if (format == FORMAT_NCHW)
return memory::format::ncdhw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc || format == memory::format::ndhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw || format == memory::format::ncdhw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C'));
int d = shape.dim_size(GetTensorDimIndex<3>(format, '0'));
int h = shape.dim_size(GetTensorDimIndex<3>(format, '1'));
int w = shape.dim_size(GetTensorDimIndex<3>(format, '2'));
// MKL-DNN requires dimensions in NCDHW format.
return memory::dims({n, c, d, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to);
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
// flat to indicate if data is 3D or not.
bool bIs3D;
/// Operations temp buffer
void* allocated_buffer_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
allocated_buffer_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
if (allocated_buffer_ != nullptr) {
cpu_allocator()->DeallocateRaw(allocated_buffer_);
}
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; }
bool GetIs3D() { return bIs3D; }
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
if (user_memory_) delete user_memory_;
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// allocate function for data buffer
inline void AllocateBuffer(size_t size) {
const int64 kMemoryAlginment = 64; // For AVX512 memory alignment.
allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size);
}
inline void* GetAllocatedBuffer() { return allocated_buffer_; }
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
reorder_memory_ = new memory(op_pd);
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle) {
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor) {
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor));
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// InsertReorderToUserMem(std::vector<primitive>* net), will remove
/// slow path in the future
inline void InsertReorderToUserMem() {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_));
stream(stream::kind::eager).submit(net).wait();
}
};
/// Base class for operations with reuse of primitives
///
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
// Dummy data which MKL DNN never operates on
unsigned char* DummyData = nullptr;
};
const mkldnn::memory::dims NONE_DIMS = {};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const string& key) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
if (stream_iter == map.end()) {
return nullptr;
} else {
CHECK(stream_iter->second != nullptr) << "nullptr present in map";
return stream_iter->second;
}
}
void SetOp(const string& key, MklPrimitive* op) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
CHECK(stream_iter == map.end());
map[key] = op;
}
/// Function to decide whether HW has AVX512 or AVX2
/// For those legacy device(w/o AVX512 and AVX2),
/// MKL-DNN GEMM will be used.
static inline bool IsLegacyPlatform() {
return (!port::TestCPUFeature(port::CPUFeature::AVX512F) &&
!port::TestCPUFeature(port::CPUFeature::AVX2));
}
/// Fuction to check whether primitive memory optimization is enabled
static inline bool IsPrimitiveMemOptEnabled() {
bool is_primitive_mem_opt_enabled = true;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true,
&is_primitive_mem_opt_enabled));
return is_primitive_mem_opt_enabled;
}
private:
static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() {
static thread_local std::unordered_map<string, MklPrimitive*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() { key_.reserve(kMaxKeyLength); }
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims& dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
string GetKey() { return key_; }
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(string(s));
key_.append(1, delimiter);
}
};
static inline memory::format get_desired_format(int channel,
bool is_2d = true) {
memory::format fmt_desired = memory::format::any;
if (port::TestCPUFeature(port::CPUFeature::AVX512F)) {
fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c;
} else if (port::TestCPUFeature(port::CPUFeature::AVX2) &&
(channel % 8) == 0) {
fmt_desired = is_2d ? memory::format::nChw8c
: memory::format::ncdhw; // no avx2 support for 3d yet.
} else {
fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw;
}
return fmt_desired;
}
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
private:
struct ReorderContext {
std::shared_ptr<mkldnn::memory> src_mem;
std::shared_ptr<mkldnn::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext()
: src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {}
} context_;
engine cpu_engine_ = engine(engine::cpu, 0);
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(new memory(
{from->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.dst_mem.reset(
new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.reorder_prim = std::make_shared<mkldnn::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from, const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to,
reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory& GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
private:
MklReorderPrimitiveFactory() {}
~MklReorderPrimitiveFactory() {}
static string CreateKey(const memory* from, const memory* to) {
string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const& from_desc = from->get_primitive_desc().desc().data;
auto const& to_desc = to->get_primitive_desc().desc().data;
const int KIdxFirstStride = 0;
memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]);
memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]);
memory::dims from_strides(
from_desc.layout_desc.blocking.strides[KIdxFirstStride],
&from_desc.layout_desc.blocking
.strides[KIdxFirstStride][from_desc.ndims]);
memory::dims to_strides(
to_desc.layout_desc.blocking.strides[KIdxFirstStride],
&to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]);
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(static_cast<int>(from_desc.format));
key_creator.AddAsKey(static_cast<int>(from_desc.data_type));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(from_strides);
key_creator.AddAsKey(static_cast<int>(to_desc.format));
key_creator.AddAsKey(static_cast<int>(to_desc.data_type));
key_creator.AddAsKey(to_dims);
key_creator.AddAsKey(to_strides);
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
/// Fuction to find(or create) a reorder from memory pointed by
/// from to memory pointed by to, it will created primitive or
/// get primitive from pool if it is cached.
/// Returns the primitive.
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive* reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return *reorder_prim->GetPrimitive();
}
// utility function to determine if it is conv 1x1 and stride != 1
// for purpose of temporarily disabling primitive reuse
inline bool IsConv1x1StrideNot1(memory::dims filter_dims,
memory::dims strides) {
if (filter_dims.size() != 4 || strides.size() != 2) return false;
return ((filter_dims[2] == 1) && (filter_dims[3] == 1) &&
((strides[0] != 1) || (strides[1] != 1)));
}
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
mixed_tentusscher_myo_epi_2004_S2_10.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_10.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6190487792098,0.00127615275701324,0.780956535094998,0.780847063341448,0.000173448982750495,0.485618885325203,0.00292967767959199,0.999998364838194,1.91717709945144e-08,1.87830179933651e-05,0.999774468479350,1.00704023911659,0.999994520006527,4.64680265082926e-05,0.570657756232895,10.2852308259172,139.261705705374};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.4075043407048,2.30190614350519e-05,0.000132186955734266,0.000460438593474590,0.230805741240155,0.128769301850520,0.167089340366410,4.76580224949500,0.0120157545262487,1.45704463630229,1089.95375481761,0.000516367300199849,0.468938665984628,0.0163624321716470,0.00234457045494790,4.25912616439814e-05}; GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
omp_red_bench.c | /*
* Copyright (c) 2013-2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <omp.h>
#include "measurement_framework.h"
#include "placement.h"
#include <sched.h>
#include <pthread.h>
/*-----------------------------------------------------------*/
/* barrierKernel */
/* */
/* Main kernel for barrier benchmark. */
/* First threads under each process synchronise with an */
/* OMP BARRIER. Then a MPI barrier synchronises each MPI */
/* process. MPI barrier is called within a OpenMP master */
/* directive. */
/*-----------------------------------------------------------*/
int reduceKernel(int totalReps, int fill){
int repIter;
struct sk_measurement * mes;
char outname[512];
int i;
char address = 1;
//ensure that master runs in core 0
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(0,&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
if (fill) {
sprintf(outname, "barriers_omp_fill%d", omp_get_max_threads());
} else {
sprintf(outname, "barriers_omp_rr%d", omp_get_max_threads());
}
mes = (struct sk_measurement*) malloc(sizeof(struct sk_measurement)*(omp_get_max_threads()));
for(i=0; i<omp_get_max_threads(); i++){
uint64_t *buf = (uint64_t*) malloc(sizeof(uint64_t)*totalReps);
sk_m_init(&(mes[i]), totalReps, outname, buf);
}
uint32_t* cores = placement(omp_get_max_threads(), fill);
for (repIter=0; repIter<totalReps; repIter++){
/* Open the parallel region */
// sk_m_restart_tsc(&(mes[omp_get_max_threads()]));
#pragma omp parallel default(none) \
private(mask)\
shared(mes,cores)\
reduction(+:address)
{
int tid =omp_get_thread_num();
// sk_m_restart_tsc(&(mes[tid]));
//set affinity
CPU_ZERO(&mask);
CPU_SET(cores[tid],&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
address+=1;
#pragma omp barrier
#pragma omp barrier
sk_m_restart_tsc(&(mes[tid]));
// sk_m_add(&(mes[tid]));
}
//sk_m_add(&(mes[omp_get_max_threads()]));
sk_m_add(&(mes[tid]));
}
// for(i=0; i<=omp_get_max_threads(); i++)
sk_m_print(&(mes[0]),0);
return 0;
}
int reduceDriver(int totalReps){
/* initialise repsToDo to defaultReps */
int repsToDo = totalReps;
// int warmUpIters = 10;
printf("Entering\n");fflush(stdout);
/* perform warm-up for benchmark */
// barrierKernel(warmUpIters,1);
// printf("Warmup done\n");fflush(stdout);
/* Initialise the benchmark */
reduceKernel(repsToDo, 1);
reduceKernel(repsToDo, 0);
return 0;
}
int main(int argc, char *argv[]){
int totalReps = 1000;
printf("Calling Reduce driver\n");fflush(stdout);
reduceDriver(totalReps);
}
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#include "backprop.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define ERROR_THRESHOLD 0.00
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to, from, len) \
{ \
register char *_to, *_from; \
register int _i, _l; \
_to = (char *)(to); \
_from = (char *)(from); \
_l = (len); \
for (_i = 0; _i < _l; _i++) \
*_to++ = *_from++; \
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd() { return ((float)rand() / (float)BIGRND); }
/*** Return random number between -1.0 and 1.0 ***/
float dpn1() { return ((drnd() * 2.0) - 1.0); }
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(x) float x;
{
float m;
// x = -x;
// m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
// return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(n) int n;
{
float *new;
new = (float *)malloc((unsigned)(n * sizeof(float)));
if (new == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(m, n) int m, n;
{
int i;
float **new;
new = (float **)malloc((unsigned)(m * sizeof(float *)));
if (new == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new[i] = alloc_1d_dbl(n);
}
return (new);
}
bpnn_randomize_weights(w, m, n) float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float)rand() / RAND_MAX;
// w[i][j] = dpn1();
}
}
}
bpnn_randomize_row(w, m) float *w;
int m;
{
int i;
for (i = 0; i <= m; i++) {
// w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
bpnn_zero_weights(w, m, n) float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(seed) {
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = (BPNN *)malloc(sizeof(BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(net) BPNN *net;
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *)net->input_units);
free((char *)net->hidden_units);
free((char *)net->output_units);
free((char *)net->hidden_delta);
free((char *)net->output_delta);
free((char *)net->target);
for (i = 0; i <= n1; i++) {
free((char *)net->input_weights[i]);
free((char *)net->input_prev_weights[i]);
}
free((char *)net->input_weights);
free((char *)net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *)net->hidden_weights[i]);
free((char *)net->hidden_prev_weights[i]);
}
free((char *)net->hidden_weights);
free((char *)net->hidden_prev_weights);
free((char *)net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
int compareResults(float *l2, float *l2_gpu, int n2) {
int i;
int fail = 0;
// Compare C with D
for (i = 1; i <= n2; i++) {
if (percentDiff(l2[i], l2_gpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
}
void bpnn_layerforward(l1, l2, conn, n1, n2) float *l1, *l2, **conn;
int n1, n2;
{
double t_start, t_end;
float sum;
int j, k;
float *conn_gpu = (float *)malloc(sizeof(float) * ((n1 + 1) * (n2 + 1)));
for (j = 1; j <= n2; j++) {
for (k = 0; k <= n1; k++) {
conn_gpu[k * n2 + j] = conn[k][j];
}
}
float *l2_gpu = (float *)malloc(sizeof(float) * (n2 + 1));
/*** Set up thresholding unit ***/
l1[0] = 1.0;
t_start = rtclock();
#pragma omp target map(to : conn_gpu[ : ( \
n1 + 1) * (n2 + 1)], \
l1[ : n1 + 1]) map(tofrom : l2_gpu[ : n2 + 1]) device(DEVICE_ID)
{
#pragma omp parallel for
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn_gpu[k * n2 + j] * l1[k];
}
l2_gpu[j] = (1.0 / (1.0 + exp(-sum)));
}
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
for (j = 1; j <= n2; j++) {
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(l2, l2_gpu, n2);
printf("\n");
}
// extern "C"
void bpnn_output_error(delta, target, output, nj, err) float *delta, *target,
*output, *err;
int nj;
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(delta_h, nh, delta_o, no, who, hidden,
err) float *delta_h,
*delta_o, *hidden, **who, *err;
int nh, no;
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void compareResults2(float *w_gpu, float **w_cpu, float *oldw_gpu,
float **oldw_cpu, int ndelta, int nly) {
int i;
int fail = 0;
// Compare C with D
int k, j;
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
if (percentDiff(w_gpu[k * ndelta + j], w_cpu[k][j]) > ERROR_THRESHOLD) {
fail++;
}
if (percentDiff(oldw_gpu[k * ndelta + j], oldw_cpu[k][j]) >
ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
}
void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw) float *delta, *ly,
**w, **oldw;
{
float new_dw;
int k, j;
ly[0] = 1.0;
double t_start, t_end;
// eta = 0.3;
// momentum = 0.3;
// preparar dados
float *w_gpu = (float *)malloc(sizeof(float) * ((ndelta + 1) * (nly + 1)));
float *oldw_gpu = (float *)malloc(sizeof(float) * ((ndelta + 1) * (nly + 1)));
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
w_gpu[k * ndelta + j] = w[k][j];
oldw_gpu[k * ndelta + j] = oldw[k][j];
}
}
int size = (ndelta + 1) * (nly + 1);
t_start = rtclock();
#pragma omp target map( \
to : ly[ : (nly + 1)], delta[ : (ndelta + 1)]) \
map(tofrom : oldw_gpu[ : size], w_gpu[ : size]) \
device(DEVICE_ID)
{
#pragma omp parallel for collapse(1)
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw =
((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw_gpu[k * ndelta + j]));
w_gpu[k * ndelta + j] += new_dw;
oldw_gpu[k * ndelta + j] = new_dw;
}
}
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults2(w_gpu, w, oldw_gpu, oldw, ndelta, nly);
printf("\n");
}
void bpnn_feedforward(net) BPNN *net;
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in,
hid);
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights,
hid, out);
}
void bpnn_train(net, eo, eh) BPNN *net;
float *eo, *eh;
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in,
hid);
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights,
hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units, out,
&out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
void bpnn_save(net, filename) BPNN *net;
char *filename;
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
/// add//
FILE *pFile;
pFile = fopen(filename, "w+");
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n;
n2 = net->hidden_n;
n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
// fflush(stdout);
// write(fd, (char *) &n1, sizeof(int));
// write(fd, (char *) &n2, sizeof(int));
// write(fd, (char *) &n3, sizeof(int));
fwrite((char *)&n1, sizeof(char), sizeof(char), pFile);
fwrite((char *)&n2, sizeof(char), sizeof(char), pFile);
fwrite((char *)&n3, sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
// write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite(mem, (unsigned)(sizeof(float)),
(unsigned)((n1 + 1) * (n2 + 1) * sizeof(float)), pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
// write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite(mem, sizeof(float), (unsigned)((n2 + 1) * (n3 + 1) * sizeof(float)),
pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(filename) char *filename;
{
char *mem;
BPNN *new;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); // fflush(stdout);
read(fd, (char *)&n1, sizeof(int));
read(fd, (char *)&n2, sizeof(int));
read(fd, (char *)&n3, sizeof(int));
new = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); // fflush(stdout);
memcnt = 0;
mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float)));
read(fd, mem, (n1 + 1) * (n2 + 1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); // fflush(stdout);
memcnt = 0;
mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float)));
read(fd, mem, (n2 + 1) * (n3 + 1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); // fflush(stdout);
bpnn_zero_weights(new->input_prev_weights, n1, n2);
bpnn_zero_weights(new->hidden_prev_weights, n2, n3);
return (new);
}
|
k_gen_1048576_1_1_4_65536_4_float4_0_1_c_map_gmem_2d_copy.omp.c | #if __clang__
typedef float float16 __attribute__((ext_vector_type(16)));
typedef float float8 __attribute__((ext_vector_type(8)));
typedef float float4 __attribute__((ext_vector_type(4)));
typedef float float2 __attribute__((ext_vector_type(2)));
#if $H$ > 0
__attribute__((vec_type_hint($TV$)))
#endif
#endif
// #pragma omp simd, #pragma omp for simd, #pragma omp parallel for simd directives, extended set of atomic constructs, proc_bind clause for all parallel-based directives, depend clause for #pragma omp task directive (except for array sections), #pragma omp cancel and #pragma omp cancellation point directives, and #pragma omp taskgroup
#define NUM_THREADS 16
// N_LX_LY_WX_WY_G_TV H_TRANS
void k_gen_1048576_1_1_4_65536_4_float4_0_1_c_map_gmem_2d_copy (
float4 * restrict out,
const float4 * restrict in) {
//a simple for loop for now, for all threads, G * WY * veclength floats
#pragma omp parallel for simd schedule(static) num_threads(NUM_THREADS)
for (int gid = 0; gid < 65536 * 4; gid++) {
out[gid] = in[gid];
}
} |
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(4*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t2+Nx,256),floord(4*t3+Nx,256)),floord(Nt+Nx-4,256)),floord(2*t1+Nx+1,256)),floord(4*t1-4*t2+Nz+Nx-1,256));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),256*t4+254),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
heat.c | #define _GNU_SOURCE
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include "colormap.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
// Simulation parameters
static const unsigned int N = 500;
static const float SOURCE_TEMP = 5000.0f;
static const float BOUNDARY_TEMP = 1000.0f;
static const float MIN_DELTA = 0.05f;
static const unsigned int MAX_ITERATIONS = 20000;
static unsigned int idx(unsigned int x, unsigned int y, unsigned int stride) {
return y * stride + x;
}
static void init(unsigned int source_x, unsigned int source_y, float * matrix) {
// init
memset(matrix, 0, N * N * sizeof(float));
// place source
matrix[idx(source_x, source_y, N)] = SOURCE_TEMP;
// fill borders
for (unsigned int x = 0; x < N; ++x) {
matrix[idx(x, 0, N)] = BOUNDARY_TEMP;
matrix[idx(x, N-1, N)] = BOUNDARY_TEMP;
}
for (unsigned int y = 0; y < N; ++y) {
matrix[idx(0, y, N)] = BOUNDARY_TEMP;
matrix[idx(N-1, y, N)] = BOUNDARY_TEMP;
}
}
static void step(unsigned int source_x, unsigned int source_y, const float * current, float * next) {
for (unsigned int y = 1; y < N-1; ++y) {
#pragma ivdep
for (unsigned int x = 1; x < N-1; ++x) {
if ((y == source_y) && (x == source_x)) {
continue;
}
next[idx(x, y, N)] = (current[idx(x, y-1, N)] +
current[idx(x-1, y, N)] +
current[idx(x+1, y, N)] +
current[idx(x, y+1, N)]) / 4.0f;
}
}
}
static float diff(const float * current, const float * next) {
float maxdiff = 0.0f;
for (unsigned int y = 1; y < N-1; ++y) {
for (unsigned int x = 1; x < N-1; ++x) {
maxdiff = fmaxf(maxdiff, fabsf(next[idx(x, y, N)] - current[idx(x, y, N)]));
}
}
return maxdiff;
}
void write_png(float * current, int iter) {
char file[100];
uint8_t * image = malloc(3 * N * N * sizeof(uint8_t));
float maxval = fmaxf(SOURCE_TEMP, BOUNDARY_TEMP);
for (unsigned int y = 0; y < N; ++y) {
for (unsigned int x = 0; x < N; ++x) {
unsigned int i = idx(x, y, N);
colormap_rgb(COLORMAP_MAGMA, current[i], 0.0f, maxval, &image[3*i], &image[3*i + 1], &image[3*i + 2]);
}
}
sprintf(file,"heat%i.png", iter);
stbi_write_png(file, N, N, 3, image, 3 * N);
free(image);
}
int main() {
size_t array_size = N * N * sizeof(float);
float * current = _mm_malloc(array_size, 32);
float * next = _mm_malloc(array_size, 32);
srand(0);
unsigned int source_x = rand() % (N-2) + 1;
unsigned int source_y = rand() % (N-2) + 1;
printf("Heat source at (%u, %u)\n", source_x, source_y);
init(source_x, source_y, current);
memcpy(next, current, array_size);
double start = omp_get_wtime();
float t_diff = SOURCE_TEMP;
#pragma omp parallel for shared(t_diff, current, next)
for (unsigned int it = 0; it < MAX_ITERATIONS; ++it) {
if (t_diff <= MIN_DELTA) continue;
step(source_x, source_y, current, next);
t_diff = diff(current, next);
if(it%(MAX_ITERATIONS/10)==0){
printf("%u: %f\n", it, t_diff);
}
float * swap = current;
current = next;
next = swap;
}
double stop = omp_get_wtime();
printf("Computing time %f s.\n", stop-start);
write_png(current, MAX_ITERATIONS);
_mm_free(current);
_mm_free(next);
return 0;
}
|
miniblas.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "miniblas.kernel_inc.h"
int openmp_blas_sum_init (openmp_pscmc_env * pe ,openmp_blas_sum_struct * kerstr ){
return 0 ;}
void openmp_blas_sum_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_sum_struct ));
}
int openmp_blas_sum_get_num_compute_units (openmp_blas_sum_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_sum_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_sum_exec (openmp_blas_sum_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_sum_scmc_kernel ( ( kerstr )->y , ( kerstr )->rdcd , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_sum_scmc_set_parameter_y (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_rdcd (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->rdcd = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_y_cpu_core (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_numvec (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_XLEN (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_YLEN (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_ZLEN (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_ovlp (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_xblock (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_yblock (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_zblock (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_sum_scmc_set_parameter_num_ele (openmp_blas_sum_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_dot_init (openmp_pscmc_env * pe ,openmp_blas_dot_struct * kerstr ){
return 0 ;}
void openmp_blas_dot_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_dot_struct ));
}
int openmp_blas_dot_get_num_compute_units (openmp_blas_dot_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_dot_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_dot_exec (openmp_blas_dot_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_dot_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( kerstr )->rdcd , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_dot_scmc_set_parameter_y (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_x (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_rdcd (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->rdcd = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_y_cpu_core (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_numvec (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_XLEN (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_YLEN (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_ZLEN (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_ovlp (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_xblock (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_yblock (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_zblock (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_dot_scmc_set_parameter_num_ele (openmp_blas_dot_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_findmax_init (openmp_pscmc_env * pe ,openmp_blas_findmax_struct * kerstr ){
return 0 ;}
void openmp_blas_findmax_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_findmax_struct ));
}
int openmp_blas_findmax_get_num_compute_units (openmp_blas_findmax_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_findmax_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_findmax_exec (openmp_blas_findmax_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_findmax_scmc_kernel ( ( kerstr )->y , ( kerstr )->rdcd , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_findmax_scmc_set_parameter_y (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_rdcd (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->rdcd = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_y_cpu_core (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_numvec (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_XLEN (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_YLEN (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_ZLEN (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_ovlp (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_xblock (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_yblock (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_zblock (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_findmax_scmc_set_parameter_num_ele (openmp_blas_findmax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_mulxy_init (openmp_pscmc_env * pe ,openmp_blas_mulxy_struct * kerstr ){
return 0 ;}
void openmp_blas_mulxy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_mulxy_struct ));
}
int openmp_blas_mulxy_get_num_compute_units (openmp_blas_mulxy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_mulxy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_mulxy_exec (openmp_blas_mulxy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_mulxy_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_mulxy_scmc_set_parameter_y (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_x (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_y_cpu_core (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_numvec (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_XLEN (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_YLEN (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_ZLEN (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_ovlp (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_xblock (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_yblock (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_zblock (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_mulxy_scmc_set_parameter_num_ele (openmp_blas_mulxy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_axpby_init (openmp_pscmc_env * pe ,openmp_blas_axpby_struct * kerstr ){
return 0 ;}
void openmp_blas_axpby_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_axpby_struct ));
}
int openmp_blas_axpby_get_num_compute_units (openmp_blas_axpby_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_axpby_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_axpby_exec (openmp_blas_axpby_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_axpby_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( ( kerstr )->a)[0] , ( ( kerstr )->b)[0] , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_axpby_scmc_set_parameter_y (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_x (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_a (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->a = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_b (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->b = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_y_cpu_core (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_numvec (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_XLEN (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_YLEN (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_ZLEN (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_ovlp (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_xblock (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_yblock (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_zblock (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_axpby_scmc_set_parameter_num_ele (openmp_blas_axpby_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_axpy_init (openmp_pscmc_env * pe ,openmp_blas_axpy_struct * kerstr ){
return 0 ;}
void openmp_blas_axpy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_axpy_struct ));
}
int openmp_blas_axpy_get_num_compute_units (openmp_blas_axpy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_axpy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_axpy_exec (openmp_blas_axpy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_axpy_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( ( kerstr )->a)[0] , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_axpy_scmc_set_parameter_y (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_x (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_a (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->a = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_y_cpu_core (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_numvec (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_XLEN (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_YLEN (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_ZLEN (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_ovlp (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_xblock (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_yblock (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_zblock (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_axpy_scmc_set_parameter_num_ele (openmp_blas_axpy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_yisax_init (openmp_pscmc_env * pe ,openmp_blas_yisax_struct * kerstr ){
return 0 ;}
void openmp_blas_yisax_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_yisax_struct ));
}
int openmp_blas_yisax_get_num_compute_units (openmp_blas_yisax_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_yisax_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_yisax_exec (openmp_blas_yisax_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_yisax_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( ( kerstr )->a)[0] , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_yisax_scmc_set_parameter_y (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_x (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_a (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->a = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_y_cpu_core (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_numvec (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_XLEN (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_YLEN (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_ZLEN (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_ovlp (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_xblock (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_yblock (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_zblock (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_yisax_scmc_set_parameter_num_ele (openmp_blas_yisax_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_invy_init (openmp_pscmc_env * pe ,openmp_blas_invy_struct * kerstr ){
return 0 ;}
void openmp_blas_invy_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_invy_struct ));
}
int openmp_blas_invy_get_num_compute_units (openmp_blas_invy_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_invy_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_invy_exec (openmp_blas_invy_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_invy_scmc_kernel ( ( kerstr )->y , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_invy_scmc_set_parameter_y (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_y_cpu_core (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_numvec (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_XLEN (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_YLEN (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_ZLEN (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_ovlp (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_xblock (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_yblock (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_zblock (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_invy_scmc_set_parameter_num_ele (openmp_blas_invy_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_get_ITG_Potential_init (openmp_pscmc_env * pe ,openmp_blas_get_ITG_Potential_struct * kerstr ){
return 0 ;}
void openmp_blas_get_ITG_Potential_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_get_ITG_Potential_struct ));
}
int openmp_blas_get_ITG_Potential_get_num_compute_units (openmp_blas_get_ITG_Potential_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_get_ITG_Potential_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_get_ITG_Potential_exec (openmp_blas_get_ITG_Potential_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_get_ITG_Potential_scmc_kernel ( ( kerstr )->y , ( kerstr )->x , ( kerstr )->u , ( ( kerstr )->minus_over_q_e)[0] , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_y (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_x (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->x = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_u (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->u = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_minus_over_q_e (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->minus_over_q_e = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_y_cpu_core (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_numvec (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_XLEN (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_YLEN (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_ZLEN (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_ovlp (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_xblock (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_yblock (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_zblock (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_get_ITG_Potential_scmc_set_parameter_num_ele (openmp_blas_get_ITG_Potential_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_yisconst_init (openmp_pscmc_env * pe ,openmp_blas_yisconst_struct * kerstr ){
return 0 ;}
void openmp_blas_yisconst_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_yisconst_struct ));
}
int openmp_blas_yisconst_get_num_compute_units (openmp_blas_yisconst_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_yisconst_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_yisconst_exec (openmp_blas_yisconst_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_yisconst_scmc_kernel ( ( kerstr )->y , ( ( kerstr )->a)[0] , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_yisconst_scmc_set_parameter_y (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_a (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->a = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_y_cpu_core (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_numvec (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_XLEN (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_YLEN (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_ZLEN (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_ovlp (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_xblock (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_yblock (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_zblock (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_yisconst_scmc_set_parameter_num_ele (openmp_blas_yisconst_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_yiszero_init (openmp_pscmc_env * pe ,openmp_blas_yiszero_struct * kerstr ){
return 0 ;}
void openmp_blas_yiszero_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_yiszero_struct ));
}
int openmp_blas_yiszero_get_num_compute_units (openmp_blas_yiszero_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_yiszero_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_yiszero_exec (openmp_blas_yiszero_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_yiszero_scmc_kernel ( ( kerstr )->y , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_yiszero_scmc_set_parameter_y (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_y_cpu_core (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_numvec (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_XLEN (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_YLEN (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_ZLEN (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_ovlp (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_xblock (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_yblock (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_zblock (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_yiszero_scmc_set_parameter_num_ele (openmp_blas_yiszero_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_blas_yiszero_synced_init (openmp_pscmc_env * pe ,openmp_blas_yiszero_synced_struct * kerstr ){
return 0 ;}
void openmp_blas_yiszero_synced_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_blas_yiszero_synced_struct ));
}
int openmp_blas_yiszero_synced_get_num_compute_units (openmp_blas_yiszero_synced_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_blas_yiszero_synced_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_blas_yiszero_synced_exec (openmp_blas_yiszero_synced_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_blas_yiszero_synced_scmc_kernel ( ( kerstr )->y , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_blas_yiszero_synced_scmc_set_parameter_y (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_y_cpu_core (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_numvec (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_XLEN (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_YLEN (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_ZLEN (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_ovlp (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_xblock (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_yblock (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_zblock (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_blas_yiszero_synced_scmc_set_parameter_num_ele (openmp_blas_yiszero_synced_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
|
convolution_3x3_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_packn_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)4u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[8][8][packn];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row<const float>(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _r06 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _r07 = vle32_v_f32m1(r0 + packn * 7, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r00, _r06, vl), 5.25f, vfsub_vv_f32m1(_r04, _r02, vl), vl);
vfloat32m1_t _tmp7m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r07, _r01, vl), 5.25f, vfsub_vv_f32m1(_r03, _r05, vl), vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[7][m], _tmp7m, vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r02, _r06, vl), -4.25f, _r04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r01, _r05, vl), -4.25f, _r03, vl);
vfloat32m1_t _tmp1m = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp2m = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl);
vfloat32m1_t _tmp3m = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp4m = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_r06, 4.f, vfmacc_vf_f32m1(_r02, -1.25f, _r04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _tmp6m = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
vse32_v_f32m1(tmp[6][m], _tmp6m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
float* r0_tm_6 = r0_tm_0 + tiles * packn * 6;
float* r0_tm_7 = r0_tm_0 + tiles * packn * 7;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f32m1(_tmp04, _tmp02, vl), vl);
vfloat32m1_t _r0tm7 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f32m1(_tmp03, _tmp05, vl), vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl);
vfloat32m1_t _r0tm1 = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _r0tm2 = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl);
vfloat32m1_t _r0tm3 = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _r0tm4 = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_tmp06, 4.f, vfmacc_vf_f32m1(_tmp02, -1.25f, _tmp04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl);
vfloat32m1_t _r0tm5 = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _r0tm6 = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
vse32_v_f32m1(r0_tm_6, _r0tm6, vl);
vse32_v_f32m1(r0_tm_7, _r0tm7, vl);
r0_tm_0 += tiles * packn * 8;
r0_tm_1 += tiles * packn * 8;
r0_tm_2 += tiles * packn * 8;
r0_tm_3 += tiles * packn * 8;
r0_tm_4 += tiles * packn * 8;
r0_tm_5 += tiles * packn * 8;
r0_tm_6 += tiles * packn * 8;
r0_tm_7 += tiles * packn * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row<float>(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(r0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(r0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row<const float>(i / 8);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
float val4 = *r0++;
float val5 = *r0++;
float val6 = *r0++;
float val7 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
vse32_v_f32m1(output0_tm + packn * 4, _sum4, vl);
vse32_v_f32m1(output0_tm + packn * 5, _sum5, vl);
vse32_v_f32m1(output0_tm + packn * 6, _sum6, vl);
vse32_v_f32m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1((const float*)bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE c99 variable length array
float tmp[6][8][packn];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * packn * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * packn * 7;
float* output0 = out0.row<float>(i * 6) + (j * 6) * packn;
// TODO rvv optimize
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _out0tm6 = vle32_v_f32m1(output0_tm_6, vl);
vfloat32m1_t _out0tm7 = vle32_v_f32m1(output0_tm_7, vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
output0_tm_0 += tiles * packn * 8;
output0_tm_1 += tiles * packn * 8;
output0_tm_2 += tiles * packn * 8;
output0_tm_3 += tiles * packn * 8;
output0_tm_4 += tiles * packn * 8;
output0_tm_5 += tiles * packn * 8;
output0_tm_6 += tiles * packn * 8;
output0_tm_7 += tiles * packn * 8;
}
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl);
vfloat32m1_t _out04 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 4, _out04, vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl);
vfloat32m1_t _out05 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
vse32_v_f32m1(output0 + packn * 5, _out05, vl);
output0 += outw * packn;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_packn_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)4u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[6][6][packn];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const float* r0 = img0.row<const float>(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r04, _r03, vl), -4.f, vfadd_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r03, vl), 4.f, vfsub_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), -2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), 2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp5m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl);
vfloat32m1_t _r0tm1 = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm2 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm3 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm4 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm5 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
r0_tm_0 += tiles * packn * 6;
r0_tm_1 += tiles * packn * 6;
r0_tm_2 += tiles * packn * 6;
r0_tm_3 += tiles * packn * 6;
r0_tm_4 += tiles * packn * 6;
r0_tm_5 += tiles * packn * 6;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row<float>(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(r0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(r0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row<const float>(i / 8);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
float val4 = *r0++;
float val5 = *r0++;
float val6 = *r0++;
float val7 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
vse32_v_f32m1(output0_tm + packn * 4, _sum4, vl);
vse32_v_f32m1(output0_tm + packn * 5, _sum5, vl);
vse32_v_f32m1(output0_tm + packn * 6, _sum6, vl);
vse32_v_f32m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1((const float*)bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE variable length array
float tmp[4][6][packn];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
float* output0 = out0.row<float>(i * 4) + (j * 4) * packn;
// TODO rvv optimize
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp02a, vl), _tmp02b, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
output0_tm_0 += tiles * packn * 6;
output0_tm_1 += tiles * packn * 6;
output0_tm_2 += tiles * packn * 6;
output0_tm_3 += tiles * packn * 6;
output0_tm_4 += tiles * packn * 6;
output0_tm_5 += tiles * packn * 6;
}
for (int m = 0; m < 4; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
output0 += outw * packn;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 32
#define B 128
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
GB_binop__max_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8)
// A*D function (colscale): GB (_AxD__max_uint8)
// D*A function (rowscale): GB (_DxB__max_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8)
// C=scalar+B GB (_bind1st__max_uint8)
// C=scalar+B' GB (_bind1st_tran__max_uint8)
// C=A+scalar GB (_bind2nd__max_uint8)
// C=A'+scalar GB (_bind2nd_tran__max_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB006-indirectaccess2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (p1 - p2 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with a distance of 12 :
indexSet[5]- indexSet[0] = 533 - 521 = 12
So there is loop carried dependence (e.g. between loops with index values of 0 and 5).
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
When N is 180, two iteraions with N=0 and N= 5 have loop carried dependences.
For static even scheduling, we must have at least 36 threads (180/36=5 iterations)
so iteration 0 and 5 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
#include <omp.h>
int indexSet[180] = {(521), (523), (525), (527), (529), (533), (547), (549), (551), (553), (555), (557), (573), (575), (577), (579), (581), (583), (599), (601), (603), (605), (607), (609), (625), (627), (629), (631), (633), (635), (651), (653), (655), (657), (659), (661), (859), (861), (863), (865), (867), (869), (885), (887), (889), (891), (893), (895), (911), (913), (915), (917), (919), (921), (937), (939), (941), (943), (945), (947), (963), (965), (967), (969), (971), (973), (989), (991), (993), (995), (997), (999), (1197), (1199), (1201), (1203), (1205), (1207), (1223), (1225), (1227), (1229), (1231), (1233), (1249), (1251), (1253), (1255), (1257), (1259), (1275), (1277), (1279), (1281), (1283), (1285), (1301), (1303), (1305), (1307), (1309), (1311), (1327), (1329), (1331), (1333), (1335), (1337), (1535), (1537), (1539), (1541), (1543), (1545), (1561), (1563), (1565), (1567), (1569), (1571), (1587), (1589), (1591), (1593), (1595), (1597), (1613), (1615), (1617), (1619), (1621), (1623), (1639), (1641), (1643), (1645), (1647), (1649), (1665), (1667), (1669), (1671), (1673), (1675), (1873), (1875), (1877), (1879), (1881), (1883), (1899), (1901), (1903), (1905), (1907), (1909), (1925), (1927), (1929), (1931), (1933), (1935), (1951), (1953), (1955), (1957), (1959), (1961), (1977), (1979), (1981), (1983), (1985), (1987), (2003), (2005), (2007), (2009), (2011), (2013)
// 521+12=533
};
int main(int argc,char *argv[])
{
double *base = (double *)(malloc(sizeof(double ) * (2013 + 12 + 1)));
if (base == 0) {
printf("Error in malloc(). Aborting ...\n");
return 1;
}
double *xa1 = base;
double *xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for private (i)
for (i = 521; i <= 2025; i += 1) {
base[i] = 0.5 * i;
}
for (i = 0; i <= 179; i += 1) {
int idx = indexSet[i];
xa1[idx] += 1.0;
xa2[idx] += 3.0;
}
printf("x1[999]=%lf xa2[1285]=%lf\n",xa1[999],xa2[1285]);
free(base);
return 0;
}
|
conv_dw_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qwang@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include "conv_dw_kernel_int8_arm.h"
#ifdef __aarch64__
void depthwise_k3s1p1_int8_a72(int8_t* input, int8_t* kernel, int8_t* out, int* bias, long out_h, long out_w,
long multi, long shift, long input_w, long act_min, long act_max);
void depthwise_k3s2p1_int8_a72(int8_t* input, int8_t* kernel, int8_t* out, int* bias, long out_h, long out_w,
long multi, long shift, long input_w, long act_min, long act_max);
#else
void depthwise_k3s1_int8(int8_t* input, int8_t* kernel, int8_t* out, int* bias, int out_h, int out_w,
int multi, int shift, int input_w, int act_min, int act_max);
void depthwise_k3s2_int8(int8_t* input, int8_t* kernel, int8_t* out, int* bias, int out_h, int out_w,
int multi, int shift, int input_w, int act_min, int act_max);
#endif
int conv_dw_int8_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
priv_info->multi = (int*)sys_malloc(out_c * sizeof(int));
priv_info->q_shift = (int*)sys_malloc(out_c * sizeof(int));
float input_scale = input_tensor->scale;
float* kernel_scales = filter_tensor->scale_list;
float output_scale = output_tensor->scale;
priv_info->activation_min = -127;
priv_info->activation_max = 127;
/* set activation */
if(param->activation >= 0)
{
priv_info->activation_min = 0;
if(param->activation == 1)
priv_info->activation_max = round(1.0 / output_scale);
if(param->activation == 6)
priv_info->activation_max = round(6.0 / output_scale);
if(priv_info->activation_max > 127)
priv_info->activation_max = 127;
}
for(int i=0; i<out_c; i++)
{
float kernel_scale = kernel_scales[i];
float scale = input_scale * kernel_scale / output_scale;
int shift;
float q = frexp(scale, &shift);
int fix_q = round(q * (1ll << 31));
// printf("prerun: %f,%lld,%d,%d, %lld\n",q, fix_q, multi, q_shift, 1ll<<31);
if (fix_q == (1l << 31))
{
fix_q /= 2;
shift++;
}
priv_info->multi[i] = (int)fix_q;
priv_info->q_shift[i] = (int)shift;
}
return 0;
}
int conv_dw_int8_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->multi)
{
sys_free(priv_info->multi);
priv_info->multi = NULL;
}
if (priv_info->q_shift)
{
sys_free(priv_info->q_shift);
priv_info->q_shift = NULL;
}
return 0;
}
void conv_dw_int8_direct(int8_t* input_buf, int8_t* weight_buf, int8_t* output_buf, int* bias, int input_h, int input_w,
int output_h, int output_w, int channel_num, int stride, int* pads, int* p_multi, int* p_shift,
int activation_min, int activation_max, int num_thread, int cpu_affinity)
{
#ifdef __aarch64__
#pragma omp parallel for num_threads(num_thread)
for(int i = 0; i < channel_num; i++)
{
int8_t* input_data = input_buf + i * input_h * input_w;
int* bias_tmp = bias ? (bias + i) : NULL;
if (stride == 1)
depthwise_k3s1p1_int8_a72(input_data, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w,
p_multi[i], p_shift[i], input_w, activation_min, activation_max);
else
depthwise_k3s2p1_int8_a72(input_data, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w,
p_multi[i], p_shift[i], input_w, activation_min, activation_max);
}
#else
int8_t* input_pad = NULL;
int input_h_pad = input_h + pads[0] + pads[2];
int input_w_pad = input_w + pads[1] + pads[3];
int is_pad0 = (pads[0] == 0 && pads[1] == 0 && pads[2] == 0 && pads[3] == 0);
if(!is_pad0)
{
input_pad = (int8_t*)malloc(sizeof(int8_t) * channel_num * input_h_pad * input_w_pad + 128);
memset(input_pad, 0, sizeof(int8_t) * channel_num * input_h_pad * input_w_pad + 128);
}
// #pragma omp parallel for num_threads(num_thread)
for(int i = 0; i < channel_num; i++)
{
int8_t* input_tmp = NULL;
int* bias_tmp = bias ? (bias + i) : NULL;
int channel_size = input_h * input_w;
int inh_tmp = input_h + pads[0] + pads[2];
int inw_tmp = input_w + pads[1] + pads[3];
if(!is_pad0)
{
int8_t* tmp = input_pad + i * inh_tmp * inw_tmp;
input_tmp = tmp;
tmp += pads[0] * inw_tmp + pads[1];
for(int j = 0; j < input_h; j ++)
{
memcpy(tmp, input_buf + i * channel_size + j * input_w, input_w);
tmp += inw_tmp;
}
}
else
{
input_tmp = input_buf + i * channel_size;
}
if (stride == 1)
depthwise_k3s1_int8(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w,
p_multi[i], p_shift[i], inw_tmp, activation_min, activation_max);
else
depthwise_k3s2_int8(input_tmp, weight_buf + 9 * i, output_buf + i * output_h * output_w, bias_tmp, output_h, output_w,
p_multi[i], p_shift[i], inw_tmp, activation_min, activation_max);
}
if(!is_pad0)
{
free(input_pad);
input_pad = NULL;
}
#endif
}
int conv_dw_int8_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int pads[4] = {0};
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int act_type = param->activation;
pads[0] = param->pad_h0;
pads[1] = param->pad_w0;
pads[2] = param->pad_h1;
pads[3] = param->pad_w1;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
int activation_min = priv_info->activation_min;
int activation_max = priv_info->activation_max;
/* buffer addr */
int8_t* input_buf = ( int8_t* )input_tensor->data;
int8_t* kernel_buf = ( int8_t* )filter_tensor->data;
int8_t* output_buf = ( int8_t* )output_tensor->data;
int32_t* biases_buf = NULL;
if (bias_tensor != NULL)
biases_buf = (int32_t*)bias_tensor->data;
int* multi = priv_info->multi;
int* q_shift = priv_info->q_shift;
for (int n = 0; n < batch; n++) // batch size
{
int8_t* input = input_buf + n * input_size * group;
int8_t* kernel = kernel_buf + n * kernel_size * group;
int8_t* output = output_buf + n * output_size * group;
conv_dw_int8_direct(input, kernel, output, biases_buf, in_h, in_w,
out_h, out_w, in_c * group, stride_h, pads, multi, q_shift,
activation_min, activation_max, num_thread, cpu_affinity);
}
return 0;
}
|
trans2d.c | /*Daala video codec
Copyright (c) 2013 Daala project contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include "od_defs.h"
#include "od_filter.h"
#include "trans_tools.h"
#include "int_search.h"
#include "kiss99.h"
#define USE_FILES (0)
#define USE_AR95 (1)
#define USE_SUBSET1 (0)
#define USE_SUBSET3 (0)
#define PRINT_COV (0)
#define CG_SEARCH (0)
#define USE_SIMPLEX (1)
#define RAMP_DYADIC (0)
#if CG_SEARCH
# if USE_TYPE3 && RAMP_DYADIC
# error "Dyadic ramp constraint not supported for Type-III transform."
# endif
# if USE_SIMPLEX && RAMP_DYADIC
# error "Dyadic ramp constraint not supported with simplex search."
# endif
# if NN_SEARCH && !USE_SIMPLEX
# error "Non-negative search requires simplex search."
# endif
static void coding_gain_search(const double _r[2*B_SZ*2*B_SZ]){
# if !USE_SIMPLEX
# if B_SZ==4
{
int f[4];
int p0;
int q0;
int s0;
int s1;
double cg;
double best_cg;
best_cg=0;
# if RAMP_DYADIC
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
int t0;
f[3]=q0;
/* S0 = 4/1*(1-q0/64)
* S0 >= 1 -> 64-q0 >= 16
*/
t0=(1<<FILTER_BITS)-q0;
s0=1*t0-0;
if(s0>=(1<<FILTER_BITS-2)){
s0*=4;
f[0]=s0;
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[2]=p0;
/* S1 = 4/3*(1-(1-q0/64)*p0/64)
* S1 >= 1 -> 64^2-(64-q0)*p0 >= 64*48
* S1 = x/64 -> 64^2-(64-q0)*p0 = 0 MOD 48
*/
s1=(1<<2*FILTER_BITS)-t0*p0;
if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-2)&&s1%(3<<FILTER_BITS-2)==0){
s1/=(3<<FILTER_BITS-2);
f[1]=s1;
cg=coding_gain_2d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg);
}
}
}
}
}
# else
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[2]=p0;
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
f[3]=q0;
for(s0=(1<<FILTER_BITS);s0<=2*(1<<FILTER_BITS);s0++){
f[0]=s0;
for(s1=(1<<FILTER_BITS);s1<=2*(1<<FILTER_BITS);s1++){
f[1]=s1;
cg=coding_gain_2d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg);
}
}
}
}
}
# endif
}
# elif B_SZ==8
{
int f[10];
int p0;
int p1;
int p2;
int q0;
int q1;
int q2;
int s0;
int s1;
int s2;
int s3;
double cg;
double best_cg;
best_cg=0;
# if RAMP_DYADIC
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
int t0;
f[7]=q0;
/* S0 = 8/1*(1-q0/64)
* S0 >= 1 -> 64-q0 >= 8
*/
t0=(1<<FILTER_BITS)-q0;
s0=1*t0-0;
if(s0>=(1<<FILTER_BITS-3)){
s0*=8;
f[0]=s0;
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[4]=p0;
for(q1=(1<<FILTER_BITS);q1>=-(1<<FILTER_BITS);q1--){
int t1;
f[8]=q1;
/* S1 = 8/3*((1-q1/64)-(1-q0/64)*p0/64)
* S1 >= 1 -> 64*t1-t0*p0 >= 64*24
* S1 = x/64 -> 64*t1-t0*p0 = 0 MOD 24
*/
t1=(1<<FILTER_BITS)-q1;
s1=(1<<FILTER_BITS)*t1-t0*p0;
if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-3)&&
s1%(3<<FILTER_BITS-3)==0){
s1/=(3<<FILTER_BITS-3);
f[1]=s1;
for(p1=-(1<<FILTER_BITS);p1<=(1<<FILTER_BITS);p1++){
f[5]=p1;
for(q2=(1<<FILTER_BITS);q2>=-(1<<FILTER_BITS);q2--){
int t2;
f[9]=q2;
/* S2 = 8/5*((1-q2/64)-(1-q1/64)*p1/64)
* S2 >= 1 -> 64*t2-t1*p1) >= 64*40
* S2 = x/64 -> 64*t2-t1*p1 = 0 MOD 40
*/
t2=(1<<FILTER_BITS)-q2;
s2=(1<<FILTER_BITS)*t2-t1*p1;
if(s2>=(1<<FILTER_BITS)*(5<<FILTER_BITS-3)&&
s2%(5<<FILTER_BITS-3)==0){
s2/=(5<<FILTER_BITS-3);
f[2]=s2;
for(p2=-(1<<FILTER_BITS);p2<=(1<<FILTER_BITS);p2++){
f[6]=p2;
/* S3 = 8/7*(1-(1-q2/64)*p2/64)
* S3 >= 1 -> 64^2-t2*p2 >= 64*56
* S3 = x/64 -> 64^2-t2*p2 = 0 MOD 56
*/
s3=(1<<2*FILTER_BITS)-t2*p2;
if(s3>=(1<<FILTER_BITS)*(7<<FILTER_BITS-3)&&
s3%(7<<FILTER_BITS-3)==0){
s3/=(7<<FILTER_BITS-3);
f[3]=s3;
cg=coding_gain_2d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %i %i %i %i %i %i %-24.18G\n",
p0,p1,p2,q0,q1,q2,s0,s1,s2,s3,cg);
}
}
}
}
}
}
}
}
}
}
}
# else
# error "Exhaustive search for B_SZ==8 only supported using RAMP_DYADIC (1)."
# endif
}
# else
# error "Exhaustive search not supported for this block size."
# endif
# else
{
int dims;
int i;
kiss99_ctx ks[NUM_PROCS];
int lb[22];
int ub[22];
# if B_SZ==4
dims=3;
# elif B_SZ==8
dims=10;
# elif B_SZ==16
dims=22;
# else
# error "Unsupported block size."
# endif
for(i=0;i<dims;i++){
# if B_SZ==4
lb[i]=-2*(1<<FILTER_BITS);
ub[i]=2*(1<<FILTER_BITS);
# else
lb[i]=i<(B_SZ>>1)?(1<<FILTER_BITS):-(1<<FILTER_BITS);
ub[i]=i<(B_SZ>>1)?2*(1<<FILTER_BITS):(1<<FILTER_BITS);
# endif
}
for(i=0;i<NUM_PROCS;i++){
uint32_t srand;
srand=i*16843009; /*Broadcast char to 4xchar*/
kiss99_srand(&ks[i],(unsigned char *)&srand,sizeof(srand));
}
#pragma omp parallel for schedule(dynamic)
for(i=0;i<1024;i++){
int tid;
int j;
# if B_SZ==4
int f[3];
# elif B_SZ==8
int f[10];
# elif B_SZ==16
int f[22];
# else
# error "Unsupported block size."
# endif
double cg;
tid=OD_OMP_GET_THREAD;
for(j=0;j<dims;j++){
int range;
int mask;
int rng;
range=ub[j]-lb[j];
mask=(1<<OD_ILOG_NZ(range))-1;
do {
rng=((int)kiss99_rand(&ks[tid]))&mask;
}
while(rng>range);
f[j]=lb[j]+rng;
}
j=int_simplex_max(&cg,dims,coding_gain_2d_collapsed,_r,lb,ub,f);
fprintf(stdout,"obj=%-24.18G steps=%4d params={",cg,j);
for(j=0;j<dims;j++){
fprintf(stdout,"%3d%c",f[j],j==dims-1?'}':',');
}
fprintf(stdout,"\n");
}
}
# endif
}
#endif
#if USE_FILES
static int t_start(void *_ctx,const char *_name,const th_info *_ti,int _pli,
int _nxblocks,int _nyblocks){
trans_ctx *ctx;
fprintf(stdout,"%s %i %i\n",_name,_nxblocks,_nyblocks);
fflush(stdout);
ctx=(trans_ctx *)_ctx;
image_ctx_init(&ctx->img,_name,_nxblocks,_nyblocks);
return EXIT_SUCCESS;
}
static void t_load_data(void *_ctx,const unsigned char *_data,int _stride,
int _bi,int _bj){
trans_ctx *ctx;
ctx=(trans_ctx *)_ctx;
if(_bi==0&&_bj==0){
int y;
int x;
int j;
int i;
unsigned char buf[2*B_SZ*2*B_SZ];
for(y=0;y<ctx->img.nyblocks*B_SZ-(2*B_SZ-1);y++){
for(x=0;x<ctx->img.nxblocks*B_SZ-(2*B_SZ-1);x++){
for(j=0;j<2*B_SZ;j++){
for(i=0;i<2*B_SZ;i++){
buf[j*2*B_SZ+i]=_data[(y+j)*_stride+(x+i)];
}
}
trans_data_add(&ctx->td,buf);
}
}
}
}
#define PADDING (0)
const block_func BLOCKS[]={
t_load_data
};
const int NBLOCKS=sizeof(BLOCKS)/sizeof(*BLOCKS);
#endif
int main(int _argc,const char *_argv[]){
trans_ctx ctx[NUM_PROCS];
const int *f;
int i;
double r[2*B_SZ*2*B_SZ];
const double *cov;
(void)_argc;
(void)_argv;
#if B_SZ==4
f=OD_FILTER_PARAMS4;
#elif B_SZ==8
f=OD_FILTER_PARAMS8;
#elif B_SZ==16
f=OD_FILTER_PARAMS16;
#else
# error "Need filter params for this block size."
#endif
for(i=0;i<NUM_PROCS;i++){
trans_data_init(&ctx[i].td,2*B_SZ*2*B_SZ);
}
cov=r;
#if USE_FILES
OD_OMP_SET_THREADS(NUM_PROCS);
ne_apply_to_blocks(ctx,sizeof(*ctx),0x1,PADDING,t_start,NBLOCKS,BLOCKS,NULL,
_argc,_argv);
for(i=1;i<NUM_PROCS;i++){
trans_data_combine(&ctx[0].td,&ctx[i].td);
}
trans_data_normalize(&ctx[0].td);
# if PRINT_COV
trans_data_print(&ctx[0].td,stderr);
# endif
fprintf(stdout,"original cg=%- 24.16G\n",coding_gain_2d(ctx[0].td.cov,f));
trans_data_collapse(&ctx[0].td,2*B_SZ,r);
fprintf(stdout,"collapse cg=%- 24.16G\n",coding_gain_2d_collapsed(r,f));
trans_data_expand(&ctx[0].td,2*B_SZ,r);
fprintf(stdout,"expanded cg=%- 24.16G\n",coding_gain_2d(ctx[0].td.cov,f));
#elif USE_AR95
auto_regressive_collapsed(r,2*B_SZ*2*B_SZ,2*B_SZ,0.95);
#elif USE_SUBSET1
# if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
cov=SUBSET1_2D[B_SZ_LOG-OD_LOG_BSIZE0];
# else
# error "Need auto-correlation matrix for subset1 for this block size."
# endif
#elif USE_SUBSET3
# if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
cov=SUBSET3_2D[B_SZ_LOG-OD_LOG_BSIZE0];
# else
# error "Need auto-correlation matrix for subset3 for this block size."
# endif
#endif
#if CG_SEARCH
coding_gain_search(cov);
#else
fprintf(stdout,"cg=%-24.18G\n",coding_gain_2d_collapsed(cov,f));
#endif
for(i=0;i<NUM_PROCS;i++){
trans_data_clear(&ctx[i].td);
}
return EXIT_SUCCESS;
}
|
GB_binop__ge_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_bool
// A.*B function (eWiseMult): GB_AemultB__ge_bool
// A*D function (colscale): GB_AxD__ge_bool
// D*A function (rowscale): GB_DxB__ge_bool
// C+=B function (dense accum): GB_Cdense_accumB__ge_bool
// C+=b function (dense accum): GB_Cdense_accumb__ge_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_bool
// C=scalar+B GB_bind1st__ge_bool
// C=scalar+B' GB_bind1st_tran__ge_bool
// C=A+scalar GB_bind2nd__ge_bool
// C=A'+scalar GB_bind2nd_tran__ge_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ge_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint8
// op(A') function: GB_tran__minv_bool_uint8
// C type: bool
// A type: uint8_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint8
(
bool *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cloud_omp.c | #include <tfhe/tfhe.h>
#include <tfhe/tfhe_io.h>
#include <stdio.h>
#include <time.h>
#define IMG_SIZE 28
#define BLEN 16
#define N_LABELS 10
#define N_IMAGES 4
#define N_EPOCHS 1
#define KERNEL 2
#define PADDING 0
#define STRIDES 8
void compare_bit(LweSample* result, const LweSample* a, const LweSample* b, LweSample* lsb_carry, LweSample* tmp, const TFheGateBootstrappingCloudKeySet* bk) {
LweSample* temp1=new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp2=new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp3=new_gate_bootstrapping_ciphertext_array(1,bk->params);
LweSample* temp4=new_gate_bootstrapping_ciphertext_array(1,bk->params);
LweSample* temp5=new_gate_bootstrapping_ciphertext_array(1,bk->params);
bootsXOR(temp1, a, b, bk); //a xorb
bootsXOR(result,temp1,lsb_carry,bk); //a xor b xor ci
bootsNOT(temp4,a,bk); // complement of a
bootsAND(temp3,temp4,b,bk); // complement a and b
bootsNOT(temp5,temp1,bk); // complement of a XOR b
bootsAND(temp2,temp5,lsb_carry,bk);// complement of a XOR b AND lasb_carry
bootsOR(tmp,temp2,temp3,bk); // a&b + ci*(a xor b)
bootsCOPY(lsb_carry,tmp,bk);
}
void subtract(LweSample* result, LweSample* tmps, const LweSample* a, const LweSample* b, const int nb_bits, const TFheGateBootstrappingCloudKeySet* bk) {
//run the elementary comparator gate n times//
for (int i=0; i<nb_bits; i++){
compare_bit(&result[i], &a[i], &b[i], &tmps[0], &tmps[1], bk);
}
}
void Addition(LweSample* top1, const LweSample* a6, const LweSample* b6, LweSample* lsb_carry1, LweSample* tmp6, const TFheGateBootstrappingCloudKeySet* bk) {
LweSample* temp1=new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp2=new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp3=new_gate_bootstrapping_ciphertext_array(1,bk->params);
bootsXOR(temp1, a6, b6, bk); //a xor b
bootsXOR(top1,temp1,lsb_carry1,bk); //a xor b xor ci
bootsAND(temp2,temp1,lsb_carry1,bk); //ci and (a xor b)
bootsAND(temp3,a6,b6,bk); // a and b
bootsOR(tmp6,temp2,temp3,bk); // a&b + ci*(a xor b)
bootsCOPY(lsb_carry1,tmp6,bk);
}
void Adder(LweSample* top1, const LweSample* a6, const LweSample* b6, const int nb_bits, const TFheGateBootstrappingCloudKeySet* bk){
LweSample* tmps6 = new_gate_bootstrapping_ciphertext_array(2, bk->params);
bootsCONSTANT(&tmps6[0], 0, bk); //initialize carry to 0
//run the elementary comparator gate n times//
for (int i=0; i<nb_bits; i++){
Addition(&top1[i], &a6[i], &b6[i], &tmps6[0], &tmps6[1], bk);
}
delete_gate_bootstrapping_ciphertext_array(2, tmps6);
}
void multiplexer(LweSample* rdbdata,LweSample* a,LweSample* b,LweSample* select_line,const int nb_bit, const TFheGateBootstrappingCloudKeySet* bk){
int m=0;
for(int i=0;i<nb_bit;i++){
bootsMUX(&rdbdata[i],&select_line[m],&b[i],&a[i],bk);
}
}
void multiply(LweSample* product, LweSample* a, LweSample* b, const int nb_bits, const TFheGateBootstrappingCloudKeySet* bk){
LweSample* enc_theta=new_gate_bootstrapping_ciphertext_array(nb_bits, bk->params);
for(int i=0;i<nb_bits;i++){ //initialize theta to all zero bits
bootsCONSTANT(&enc_theta[i],0,bk);
}
for(int i=0;i<2*nb_bits;i++){ //initialize product to all zero bits
bootsCONSTANT(&product[i],0,bk);
}
for (int i=0; i<nb_bits; i++) {
LweSample* temp_result=new_gate_bootstrapping_ciphertext_array(2 * nb_bits, bk->params);
LweSample* partial_sum=new_gate_bootstrapping_ciphertext_array(2 * nb_bits, bk->params);
for(int j=0;j<2*nb_bits;j++){ //initialize temp_result to all zero bits
bootsCONSTANT(&temp_result[j],0,bk);
bootsCONSTANT(&partial_sum[j],0,bk);
}
LweSample* temp2=new_gate_bootstrapping_ciphertext_array(nb_bits, bk->params);
multiplexer(temp2,enc_theta,a,&b[i],nb_bits,bk);
for(int j=0;j<nb_bits;j++){
bootsCOPY(&temp_result[i+j],&temp2[j],bk);
}
//Add the valid result to partial_sum//
Adder(partial_sum,product,temp_result,2*nb_bits,bk);
//Change the partial sum to final product//
for(int j=0;j<2*nb_bits;j++){
bootsCOPY(&product[j],&partial_sum[j],bk);
}
}
}
void is_equal(LweSample* equal, LweSample* a, LweSample* b, const int n_bits, const TFheGateBootstrappingCloudKeySet* bk){
int i;
LweSample* temp1 = new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp2 = new_gate_bootstrapping_ciphertext_array(1, bk->params);
LweSample* temp3 = new_gate_bootstrapping_ciphertext_array(1, bk->params);
bootsCONSTANT(&equal[0],0,bk);
bootsCONSTANT(temp2,0,bk);
for(i=0; i<n_bits; i++){
bootsXOR(temp1, &a[i], &b[i], bk);
bootsOR(temp3, temp2, temp1, bk);
bootsCOPY(temp2, temp3, bk);
bootsNOT(&equal[0], temp3, bk);
}
}
///////////////////////////////////////////////
void forward_convolution(int blen, int k_size, int padding, int strides, int x_size, int out_size, LweSample* x[x_size][x_size], LweSample* conv_wt[k_size][k_size], LweSample* out[out_size][out_size], const TFheGateBootstrappingCloudKeySet* bk, TFheGateBootstrappingSecretKeySet* key){
int row, col;
printf("Conv layer output without activation function:\n");
//This is just for padding = 0
#pragma omp parallel for default(none) private(row, col) shared(k_size, strides, x_size, x, out, conv_wt, bk, blen)
for(row=0; row<x_size-k_size+1; row+=strides){
for(col=0; col<x_size-k_size+1; col+=strides){
int i, j, k;
out[(int)(row/strides)][(int)(col/strides)] = new_gate_bootstrapping_ciphertext_array(2*blen, bk->params);
for(k=0; k<2*blen; k++)
bootsCONSTANT(&out[(int)(row/strides)][(int)(col/strides)][k], 0, bk);
for(i=0; i<k_size; i++){
for(j=0; j<k_size; j++){
LweSample* temp1 = new_gate_bootstrapping_ciphertext_array(2*blen, bk->params);
LweSample* temp2 = new_gate_bootstrapping_ciphertext_array(2*blen, bk->params);
printf("row %d col %d i %d j %d\n", row, col, i, j);
multiply(temp1, x[row+i][col+j], conv_wt[i][j], blen, bk);
//normalize(temp1, norm);
Adder(temp2, out[(int)(row/strides)][(int)(col/strides)], temp1, 2*blen, bk);
for(k=0; k<2*blen; k++){
bootsCOPY(&out[(int)(row/strides)][(int)(col/strides)][k], &temp2[k], bk);
}
}
}
}
}
//////////////////Decrypting
int i, j, k;
for(i=0; i<out_size; i++){ //out_size
for(j=0; j<out_size; j++){
int a = 0;
int base = 1;
for(k=0; k<2*blen; k++){
int ai = bootsSymDecrypt(&out[i][j][k], key)>0;
a += base*ai;
base = base*2;
}
printf("%d ", a);
}
printf("\n");
}
/////////////////////////////
}
void train(int blen, int n_images, int image_size, int n_labels, int epochs, int k_size, int padding, int strides, LweSample* x_data[n_images][image_size][image_size], LweSample* y_data[n_images][n_labels], const TFheGateBootstrappingCloudKeySet* bk, TFheGateBootstrappingSecretKeySet* key){
int epoch, i, j, k;
int conv_out_size = ((int)((image_size-k_size+2*padding)/strides))+1;
//Initialize weights
LweSample* conv_wt[k_size][k_size];
LweSample* conv_out[conv_out_size][conv_out_size];
printf("Initial weights:\n");
for(i=0; i<k_size; i++){
for(j=0; j<k_size; j++){
conv_wt[i][j] = new_gate_bootstrapping_ciphertext_array(blen, bk->params);
//////////////////Decrypting
int a = 0;
int base = 1;
//////////////////
for(k=0; k<blen; k++){
if(k==0)
bootsCONSTANT(&conv_wt[i][j][k], 1, bk);
else
bootsCONSTANT(&conv_wt[i][j][k], 0, bk);
//////////////////Decrypting
int ai = bootsSymDecrypt(&conv_wt[i][j][k], key)>0;
a += base*ai;
base = base*2;
//////////////////
}
printf("%d ", a);
}
printf("\n");
}
for(epoch=0; epoch<N_EPOCHS; epoch++){
//AT LAST PUT LOOP TO DO THIS FOR ALL SAMPLES
time_t st_time = clock();
time_t s = time(NULL);
struct tm* start_time = localtime(&s);
printf("Forward prop start time: %02d:%02d:%02d\n", start_time->tm_hour, start_time->tm_min, start_time->tm_sec);
forward_convolution(blen, k_size, padding, strides, image_size, conv_out_size, x_data[0], conv_wt, conv_out, bk, key);
time_t en_time = clock();
s = time(NULL);
struct tm* end_time = localtime(&s);
printf("Time to execute forward prop for 1 data point = %ld seconds\n", (en_time-st_time)/1000000);
printf("Forward prop end time: %02d:%02d:%02d\n", end_time->tm_hour, end_time->tm_min, end_time->tm_sec);
}
}
////////////////////////////////////
LweSample* x_data[N_IMAGES][IMG_SIZE][IMG_SIZE];
LweSample* y_data[N_IMAGES][N_LABELS];
void main(){
printf("Reading the key...\n");
//Reading the cloud key from file
FILE* cloud_key = fopen("cloud.key","rb");
TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key);
fclose(cloud_key);
//Params are inside the key
const TFheGateBootstrappingParameterSet* params = bk->params;
//Reading the cloud data (labels and features)
printf("Reading the features and labels...\n");
FILE* dataxf = fopen("x_data.data","rb");
FILE* datayf = fopen("y_data.data","rb");
int n, i, j, k, l;
for(n=0; n<N_IMAGES; n++){
for(i=0; i<IMG_SIZE; i++){
for(j=0; j<IMG_SIZE; j++){
x_data[n][i][j] = new_gate_bootstrapping_ciphertext_array(BLEN,params);
for(k=0; k<BLEN; k++)
import_gate_bootstrapping_ciphertext_fromFile(dataxf, &x_data[n][i][j][k], params);
}
}
for(l=0; l<N_LABELS; l++){
y_data[n][l] = new_gate_bootstrapping_ciphertext_array(BLEN,params);
for(k=0; k<BLEN; k++)
import_gate_bootstrapping_ciphertext_fromFile(datayf, &y_data[n][l][k], params);
}
}
fclose(dataxf);
fclose(datayf);
////////////////////////////For Verification
FILE* secret_key = fopen("secret.key","rb");
TFheGateBootstrappingSecretKeySet* key = new_tfheGateBootstrappingSecretKeySet_fromFile(secret_key);
fclose(secret_key);
///////////////////////////////////////
//Implementation of model
//Training
train(BLEN, N_IMAGES, IMG_SIZE, N_LABELS, N_EPOCHS, KERNEL, PADDING, STRIDES, x_data, y_data, bk, key);
//deleting arrays
for(n=0; n<N_IMAGES; n++){
for(i=0; i<IMG_SIZE; i++)
for(j=0; j<IMG_SIZE; j++)
delete_gate_bootstrapping_ciphertext_array(BLEN,x_data[n][i][j]);
for(l=0; l<N_LABELS; l++)
delete_gate_bootstrapping_ciphertext_array(BLEN,y_data[n][l]);
}
}
|
simd-2.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
__UINTPTR_TYPE__ arr[1027];
__attribute__((noinline, noclone)) void
foo ()
{
int i, v;
#pragma omp simd private (v) safelen(16)
for (i = 0; i < 1027; i++)
arr[i] = (__UINTPTR_TYPE__) &v;
}
int
main ()
{
int i, j, cnt = 0;
__UINTPTR_TYPE__ arr2[16];
foo ();
for (i = 0; i < 1027; i++)
{
for (j = 0; j < cnt; j++)
if (arr[i] == arr2[j])
break;
if (j != cnt)
continue;
if (cnt == 16)
abort ();
arr2[cnt++] = arr[i];
}
return 0;
}
|
gmm.c | /** @file gmm.c
** @brief Gaussian Mixture Models - Implementation
** @author David Novotny
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2013 David Novotny and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm Gaussian Mixture Models (GMM)
@author David Novotny
@author Andrea Vedaldi
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref gmm.h is an implementation of *Gaussian Mixture Models* (GMMs).
The main functionality provided by this module is learning GMMs from
data by maximum likelihood. Model optimization uses the Expectation
Maximization (EM) algorithm @cite{dempster77maximum}. The
implementation supports @c float or @c double data types, is
parallelized, and is tuned to work reliably and effectively on
datasets of visual features. Stability is obtained in part by
regularizing and restricting the parameters of the GMM.
@ref gmm-starting demonstreates how to use the C API to compute the FV
representation of an image. For further details refer to:
- @subpage gmm-fundamentals
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
In order to use @ref gmm.h to learn a GMM from training data, create a
new ::VlGMM object instance, set the parameters as desired, and run
the training code. The following example learns @c numClusters
Gaussian components from @c numData vectors of dimension @c dimension
and storage class @c float using at most 100 EM iterations:
@code
float * means ;
float * covariances ;
float * priors ;
float * posteriors ;
double loglikelihood ;
// create a new instance of a GMM object for float data
gmm = vl_gmm_new (VL_TYPE_FLOAT, dimension, numClusters) ;
// set the maximum number of EM iterations to 100
vl_gmm_set_max_num_iterations (gmm, 100) ;
// set the initialization to random selection
vl_gmm_set_initialization (gmm,VlGMMRand);
// cluster the data, i.e. learn the GMM
vl_gmm_cluster (gmm, data, numData);
// get the means, covariances, and priors of the GMM
means = vl_gmm_get_means(gmm);
covariances = vl_gmm_get_covariances(gmm);
priors = vl_gmm_get_priors(gmm);
// get loglikelihood of the estimated GMM
loglikelihood = vl_gmm_get_loglikelihood(gmm) ;
// get the soft assignments of the data points to each cluster
posteriors = vl_gmm_get_posteriors(gmm) ;
@endcode
@note ::VlGMM assumes that the covariance matrices of the GMM are
diagonal. This reduces significantly the number of parameters to learn
and is usually an acceptable compromise in vision applications. If the
data is significantly correlated, it can be beneficial to de-correlate
it by PCA rotation or projection in pre-processing.
::vl_gmm_get_loglikelihood is used to get the final loglikelihood of
the estimated mixture, ::vl_gmm_get_means and ::vl_gmm_get_covariances
to obtain the means and the diagonals of the covariance matrices of
the estimated Gaussian modes, and ::vl_gmm_get_posteriors to get the
posterior probabilities that a given point is associated to each of
the modes (soft assignments).
The learning algorithm, which uses EM, finds a local optimum of the
objective function. Therefore the initialization is crucial in
obtaining a good model, measured in term of the final
loglikelihood. ::VlGMM supports a few methods (use
::vl_gmm_set_initialization to choose one) as follows:
Method | ::VlGMMInitialization enumeration | Description
----------------------|-----------------------------------------|-----------------------------------------------
Random initialization | ::VlGMMRand | Random initialization of the mixture parameters
KMeans | ::VlGMMKMeans | Initialization of the mixture parameters using ::VlKMeans
Custom | ::VlGMMCustom | User specified initialization
Note that in the case of ::VlGMMKMeans initialization, an object of
type ::VlKMeans object must be created and passed to the ::VlGMM
instance (see @ref kmeans to see how to correctly set up this object).
When a user wants to use the ::VlGMMCustom method, the initial means,
covariances and priors have to be specified using the
::vl_gmm_set_means, ::vl_gmm_set_covariances and ::vl_gmm_set_priors
methods.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm-fundamentals GMM fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
A *Gaussian Mixture Model* (GMM) is a mixture of $K$ multivariate
Gaussian distributions. In order to sample from a GMM, one samples
first the component index $k \in \{1,\dots,K\}$ with *prior
probability* $\pi_k$, and then samples the vector $\bx \in
\mathbb{R}^d$ from the $k$-th Gaussian distribution
$p(\bx|\mu_k,\Sigma_k)$. Here $\mu_k$ and $\Sigma_k$ are respectively
the *mean* and *covariance* of the distribution. The GMM is completely
specified by the parameters $\Theta=\{\pi_k,\mu_k,\Sigma_k; k =
1,\dots,K\}$
The density $p(\bx|\Theta)$ induced on the training data is obtained
by marginalizing the component selector $k$, obtaining
\[
p(\bx|\Theta)
= \sum_{k=1}^{K} \pi_k p( \bx_i |\mu_k,\Sigma_k),
\qquad
p( \bx |\mu_k,\Sigma_k)
=
\frac{1}{\sqrt{(2\pi)^d\det\Sigma_k}}
\exp\left[
-\frac{1}{2} (\bx-\mu_k)^\top\Sigma_k^{-1}(\bx-\mu_k)
\right].
\]
Learning a GMM to fit a dataset $X=(\bx_1, \dots, \bx_n)$ is usually
done by maximizing the log-likelihood of the data:
@f[
\ell(\Theta;X)
= E_{\bx\sim\hat p} [ \log p(\bx|\Theta) ]
= \frac{1}{n}\sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi_k p(\bx_i|\mu_k, \Sigma_k)
@f]
where $\hat p$ is the empirical distribution of the data. An algorithm
to solve this problem is introduced next.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-em Learning a GMM by expectation maximization
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The direct maximization of the log-likelihood function of a GMM is
difficult due to the fact that the assignments of points to Gaussian
mode is not observable and, as such, must be treated as a latent
variable.
Usually, GMMs are learned by using the *Expectation Maximization* (EM)
algorithm @cite{dempster77maximum}. Consider in general the problem of
estimating to the maximum likelihood a distribution $p(x|\Theta) =
\int p(x,h|\Theta)\,dh$, where $x$ is a measurement, $h$ is a *latent
variable*, and $\Theta$ are the model parameters. By introducing an
auxiliary distribution $q(h|x)$ on the latent variable, one can use
Jensen inequality to obtain the following lower bound on the
log-likelihood:
@f{align*}
\ell(\Theta;X) =
E_{x\sim\hat p} \log p(x|\Theta)
&= E_{x\sim\hat p} \log \int p(x,h|\Theta) \,dh \\
&= E_{x\sim\hat p} \log \int \frac{p(x,h|\Theta)}{q(h|x)} q(h|x)\,dh \\
&\geq E_{x\sim\hat p} \int q(h) \log \frac{p(x,h|\Theta)}{q(h|x)}\,dh \\
&= E_{(x,q) \sim q(h|x) \hat p(x)} \log p(x,h|\Theta) -
E_{(x,q) \sim q(h|x) \hat p(x)} \log q(h|x)
@f}
The first term of the last expression is the log-likelihood of the
model where both the $x$ and $h$ are observed and joinlty distributed
as $q(x|h)\hat p(x)$; the second term is the a average entropy of the
latent variable, which does not depend on $\Theta$. This lower bound
is maximized and becomes tight by setting $q(h|x) = p(h|x,\Theta)$ to
be the posterior distribution on the latent variable $h$ (given the
current estimate of the parameters $\Theta$). In fact:
\[
E_{x \sim \hat p} \log p(x|\Theta)
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)}\left[ \log \frac{p(x,h|\Theta)}{p(h|x,\Theta)} \right]
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)} [ \log p(x|\Theta) ]
=
\ell(\Theta;X).
\]
EM alternates between updating the latent variable auxiliary
distribution $q(h|x) = p(h|x,\Theta_t)$ (*expectation step*) given the
current estimate of the parameters $\Theta_t$, and then updating the
model parameters $\Theta_{t+1}$ by maximizing the log-likelihood lower
bound derived (*maximization step*). The simplification is that in the
maximization step both $x$ and $h$ are now ``observed'' quantities.
This procedure converges to a local optimum of the model
log-likelihood.
@subsection gmm-expectation-step Expectation step
In the case of a GMM, the latent variables are the point-to-cluster
assignments $k_i, i=1,\dots,n$, one for each of $n$ data points. The
auxiliary distribution $q(k_i|\bx_i) = q_{ik}$ is a matrix with $n
\times K$ entries. Each row $q_{i,:}$ can be thought of as a vector of
soft assignments of the data points $\bx_i$ to each of the Gaussian
modes. Setting $q_{ik} = p(k_i | \bx_i, \Theta)$ yields
\[
q_{ik} =
\frac
{\pi_k p(\bx_i|\mu_k,\Sigma_k)}
{\sum_{l=1}^K \pi_l p(\bx_i|\mu_l,\Sigma_l)}
\]
where the Gaussian density $p(\bx_i|\mu_k,\Sigma_k)$ was given above.
One important point to keep in mind when these probabilities are
computed is the fact that the Gaussian densities may attain very low
values and underflow in a vanilla implementation. Furthermore, VLFeat
GMM implementation restricts the covariance matrices to be
diagonal. In this case, the computation of the determinant of
$\Sigma_k$ reduces to computing the trace of the matrix and the
inversion of $\Sigma_k$ could be obtained by inverting the elements on
the diagonal of the covariance matrix.
@subsection gmm-maximization-step Maximization step
The M step estimates the parameters of the Gaussian mixture components
and the prior probabilities $\pi_k$ given the auxiliary distribution
on the point-to-cluster assignments computed in the E step. Since all
the variables are now ``observed'', the estimate is quite simple. For
example, the mean $\mu_k$ of a Gaussian mode is obtained as the mean
of the data points assigned to it (accounting for the strength of the
soft assignments). The other quantities are obtained in a similar
manner, yielding to:
@f{align*}
\mu_k &= { { \sum_{i=1}^n q_{ik} \bx_{i} } \over { \sum_{i=1}^n q_{ik} } },
\\
\Sigma_k &= { { \sum_{i=1}^n { q_{ik} (\bx_{i} - \mu_{k}) {(\bx_{i} - \mu_{k})}^T } } \over { \sum_{i=1}^n q_{ik} } },
\\
\pi_k &= { \sum_{i=1}^n { q_{ik} } \over { \sum_{i=1}^n \sum_{l=1}^K q_{il} } }.
@f}
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-fundamentals-init Initialization algorithms
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The EM algorithm is a local optimization method. As such, the quality
of the solution strongly depends on the quality of the initial values
of the parameters (i.e. of the locations and shapes of the Gaussian
modes).
@ref gmm.h supports the following cluster initialization algorithms:
- <b>Random data points.</b> (::vl_gmm_init_with_rand_data) This method
sets the means of the modes by sampling at random a corresponding
number of data points, sets the covariance matrices of all the modes
are to the covariance of the entire dataset, and sets the prior
probabilities of the Gaussian modes to be uniform. This
initialization method is the fastest, simplest, as well as the one
most likely to end in a bad local minimum.
- <b>KMeans initialization</b> (::vl_gmm_init_with_kmeans) This
method uses KMeans to pre-cluster the points. It then sets the means
and covariances of the Gaussian distributions the sample means and
covariances of each KMeans cluster. It also sets the prior
probabilities to be proportional to the mass of each cluster. In
order to use this initialization method, a user can specify an
instance of ::VlKMeans by using the function
::vl_gmm_set_kmeans_init_object, or let ::VlGMM create one
automatically.
Alternatively, one can manually specify a starting point
(::vl_gmm_set_priors, ::vl_gmm_set_means, ::vl_gmm_set_covariances).
**/
#include "gmm.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef VL_DISABLE_SSE2
#include "mathop_sse2.h"
#endif
#ifndef VL_DISABLE_AVX
#include "mathop_avx.h"
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
#define VL_GMM_MIN_VARIANCE 1e-6
#define VL_GMM_MIN_POSTERIOR 1e-2
#define VL_GMM_MIN_PRIOR 1e-6
struct _VlGMM
{
vl_type dataType ; /**< Data type. */
vl_size dimension ; /**< Data dimensionality. */
vl_size numClusters ; /**< Number of clusters */
vl_size numData ; /**< Number of last time clustered data points. */
vl_size maxNumIterations ; /**< Maximum number of refinement iterations. */
vl_size numRepetitions ; /**< Number of clustering repetitions. */
int verbosity ; /**< Verbosity level. */
void * means; /**< Means of Gaussian modes. */
void * covariances; /**< Diagonals of covariance matrices of Gaussian modes. */
void * priors; /**< Weights of Gaussian modes. */
void * posteriors; /**< Probabilities of correspondences of points to clusters. */
double * sigmaLowBound ; /**< Lower bound on the diagonal covariance values. */
VlGMMInitialization initialization; /**< Initialization option */
VlKMeans * kmeansInit; /**< Kmeans object for initialization of gaussians */
double LL ; /**< Current solution loglikelihood */
vl_bool kmeansInitIsOwner; /**< Indicates whether a user provided the kmeans initialization object */
} ;
/* ---------------------------------------------------------------- */
/* Life-cycle */
/* ---------------------------------------------------------------- */
static void
_vl_gmm_prepare_for_data (VlGMM* self, vl_size numData)
{
if (self->numData < numData) {
vl_free(self->posteriors) ;
self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ;
}
self->numData = numData ;
}
/** @brief Create a new GMM object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param dimension dimension of the data.
** @param numComponents number of Gaussian mixture components.
** @return new GMM object instance.
**/
VlGMM *
vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents)
{
vl_index i ;
vl_size size = vl_get_type_size(dataType) ;
VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ;
self->dataType = dataType;
self->numClusters = numComponents ;
self->numData = 0;
self->dimension = dimension ;
self->initialization = VlGMMRand;
self->verbosity = 0 ;
self->maxNumIterations = 50;
self->numRepetitions = 1;
self->sigmaLowBound = NULL ;
self->priors = NULL ;
self->covariances = NULL ;
self->means = NULL ;
self->posteriors = NULL ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE;
self->priors = vl_calloc (numComponents, size) ;
self->means = vl_calloc (numComponents * dimension, size) ;
self->covariances = vl_calloc (numComponents * dimension, size) ;
self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ;
for (i = 0 ; i < (unsigned)self->dimension ; ++i) { self->sigmaLowBound[i] = 1e-4 ; }
return self ;
}
/** @brief Reset state
** @param self object.
**
** The function reset the state of the GMM object. It deletes
** any stored posterior and other internal state variables.
**/
void
vl_gmm_reset (VlGMM * self)
{
if (self->posteriors) {
vl_free(self->posteriors) ;
self->posteriors = NULL ;
self->numData = 0 ;
}
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE ;
}
}
/** @brief Deletes a GMM object
** @param self GMM object instance.
**
** The function deletes the GMM object instance created
** by ::vl_gmm_new.
**/
void
vl_gmm_delete (VlGMM * self)
{
if(self->means) vl_free(self->means);
if(self->covariances) vl_free(self->covariances);
if(self->priors) vl_free(self->priors);
if(self->posteriors) vl_free(self->posteriors);
if(self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit);
}
vl_free(self);
}
/* ---------------------------------------------------------------- */
/* Getters and setters */
/* ---------------------------------------------------------------- */
/** @brief Get data type
** @param self object
** @return data type.
**/
vl_type
vl_gmm_get_data_type (VlGMM const * self)
{
return self->dataType ;
}
/** @brief Get the number of clusters
** @param self object
** @return number of clusters.
**/
vl_size
vl_gmm_get_num_clusters (VlGMM const * self)
{
return self->numClusters ;
}
/** @brief Get the number of data points
** @param self object
** @return number of data points.
**/
vl_size
vl_gmm_get_num_data (VlGMM const * self)
{
return self->numData ;
}
/** @brief Get the log likelihood of the current mixture
** @param self object
** @return loglikelihood.
**/
double
vl_gmm_get_loglikelihood (VlGMM const * self)
{
return self->LL ;
}
/** @brief Get verbosity level
** @param self object
** @return verbosity level.
**/
int
vl_gmm_get_verbosity (VlGMM const * self)
{
return self->verbosity ;
}
/** @brief Set verbosity level
** @param self object
** @param verbosity verbosity level.
**/
void
vl_gmm_set_verbosity (VlGMM * self, int verbosity)
{
self->verbosity = verbosity ;
}
/** @brief Get means
** @param self object
** @return cluster means.
**/
void const *
vl_gmm_get_means (VlGMM const * self)
{
return self->means ;
}
/** @brief Get covariances
** @param self object
** @return diagonals of cluster covariance matrices.
**/
void const *
vl_gmm_get_covariances (VlGMM const * self)
{
return self->covariances ;
}
/** @brief Get priors
** @param self object
** @return priors of cluster gaussians.
**/
void const *
vl_gmm_get_priors (VlGMM const * self)
{
return self->priors ;
}
/** @brief Get posteriors
** @param self object
** @return posterior probabilities of cluster memberships.
**/
void const *
vl_gmm_get_posteriors (VlGMM const * self)
{
return self->posteriors ;
}
/** @brief Get maximum number of iterations
** @param self object
** @return maximum number of iterations.
**/
vl_size
vl_gmm_get_max_num_iterations (VlGMM const * self)
{
return self->maxNumIterations ;
}
/** @brief Set maximum number of iterations
** @param self VlGMM filter.
** @param maxNumIterations maximum number of iterations.
**/
void
vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations)
{
self->maxNumIterations = maxNumIterations ;
}
/** @brief Get maximum number of repetitions.
** @param self object
** @return current number of repretitions for quantization.
**/
vl_size
vl_gmm_get_num_repetitions (VlGMM const * self)
{
return self->numRepetitions ;
}
/** @brief Set maximum number of repetitions
** @param self object
** @param numRepetitions maximum number of repetitions.
** The number of repetitions cannot be smaller than 1.
**/
void
vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions)
{
assert (numRepetitions >= 1) ;
self->numRepetitions = numRepetitions ;
}
/** @brief Get data dimension
** @param self object
** @return data dimension.
**/
vl_size
vl_gmm_get_dimension (VlGMM const * self)
{
return self->dimension ;
}
/** @brief Get initialization algorithm
** @param self object
** @return initialization algorithm.
**/
VlGMMInitialization
vl_gmm_get_initialization (VlGMM const * self)
{
return self->initialization ;
}
/** @brief Set initialization algorithm.
** @param self object
** @param init initialization algorithm.
**/
void
vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init)
{
self->initialization = init;
}
/** @brief Get KMeans initialization object.
** @param self object
** @return kmeans initialization object.
**/
VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self)
{
return self->kmeansInit;
}
/** @brief Set KMeans initialization object.
** @param self object
** @param kmeans initialization KMeans object.
**/
void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans)
{
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
}
self->kmeansInit = kmeans;
self->kmeansInitIsOwner = VL_FALSE;
}
/** @brief Get the lower bound on the diagonal covariance values.
** @param self object
** @return lower bound on covariances.
**/
double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self)
{
return self->sigmaLowBound;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bounds bounds.
**
** There is one lower bound per dimension. Use ::vl_gmm_set_covariance_lower_bound
** to set all of them to a given scalar.
**/
void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds)
{
memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bound bound.
**
** While there is one lower bound per dimension, this function sets
** all of them to the specified scalar. Use ::vl_gmm_set_covariance_lower_bounds
** to set them individually.
**/
void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound)
{
int i ;
for (i = 0 ; i < (signed)self->dimension ; ++i) {
self->sigmaLowBound[i] = bound ;
}
}
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_gmm
#include "shuffle-def.h"
/* #ifdef VL_GMM_INSTANTITATING */
#endif
/* ---------------------------------------------------------------- */
#ifdef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Posterior assignments */
/* ---------------------------------------------------------------- */
/** @fn vl_get_gmm_data_posterior_f(float*,vl_size,vl_size,float const*,float const*,vl_size,float const*,float const*)
** @brief Get Gaussian modes posterior probabilities
** @param posteriors posterior probabilities (output)/
** @param numClusters number of modes in the GMM model.
** @param numData number of data elements.
** @param priors prior mode probabilities of the GMM model.
** @param means means of the GMM model.
** @param dimension data dimension.
** @param covariances diagonal covariances of the GMM model.
** @param data data.
** @return data log-likelihood.
**
** This is a helper function that does not require a ::VlGMM object
** instance to operate.
**/
double
VL_XCAT(vl_get_gmm_data_posteriors_, SFX)
(TYPE * posteriors,
vl_size numClusters,
vl_size numData,
TYPE const * priors,
TYPE const * means,
vl_size dimension,
TYPE const * covariances,
TYPE const * data)
{
vl_index i_d, i_cl;
vl_size dim;
double LL = 0;
TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI);
TYPE * logCovariances ;
TYPE * logWeights ;
TYPE * invCovariances ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ;
#else
VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ;
#endif
logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ;
invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ;
logWeights = vl_malloc(sizeof(TYPE) * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads())
#endif
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE logSigma = 0 ;
if (priors[i_cl] < VL_GMM_MIN_PRIOR) {
logWeights[i_cl] = - (TYPE) VL_INFINITY_D ;
} else {
logWeights[i_cl] = log(priors[i_cl]);
}
for(dim = 0 ; dim < dimension ; ++ dim) {
logSigma += log(covariances[i_cl*dimension + dim]);
invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim];
}
logCovariances[i_cl] = logSigma;
} /* end of parallel region */
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \
num_threads(vl_get_max_threads())
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) {
TYPE clusterPosteriorsSum = 0;
TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE p =
logWeights[i_cl]
- halfDimLog2Pi
- 0.5 * logCovariances[i_cl]
- 0.5 * distFn (dimension,
data + i_d * dimension,
means + i_cl * dimension,
invCovariances + i_cl * dimension) ;
posteriors[i_cl + i_d * numClusters] = p ;
if (p > maxPosterior) { maxPosterior = p ; }
}
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
p = exp(p - maxPosterior) ;
posteriors[i_cl + i_d * numClusters] = p ;
clusterPosteriorsSum += p ;
}
LL += log(clusterPosteriorsSum) + (double) maxPosterior ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ;
}
} /* end of parallel region */
vl_free(logCovariances);
vl_free(logWeights);
vl_free(invCovariances);
return LL;
}
/* ---------------------------------------------------------------- */
/* Restarts zero-weighted Gaussians */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData) ;
static vl_size
VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data)
{
vl_size dimension = self->dimension;
vl_size numClusters = self->numClusters;
vl_index i_cl, j_cl, i_d, d;
vl_size zeroWNum = 0;
TYPE * priors = (TYPE*)self->priors ;
TYPE * means = (TYPE*)self->means ;
TYPE * covariances = (TYPE*)self->covariances ;
TYPE * posteriors = (TYPE*)self->posteriors ;
//VlRand * rand = vl_get_rand() ;
TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ;
if (numClusters <= 1) { return 0 ; }
/* compute statistics */
{
vl_uindex i, k ;
vl_size numNullAssignments = 0 ;
for (i = 0 ; i < self->numData ; ++i) {
for (k = 0 ; k < self->numClusters ; ++k) {
TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ;
mass[k] += p ;
if (p < VL_GMM_MIN_POSTERIOR) {
numNullAssignments ++ ;
}
}
}
if (self->verbosity) {
VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ;
}
}
#if 0
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) {
if (priors[i_cl] < 0.00001/numClusters) {
double mass = priors[0] ;
vl_index best = 0 ;
for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) {
if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; }
}
if (j_cl == i_cl) {
/* this should never happen */
continue ;
}
j_cl = best ;
zeroWNum ++ ;
VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ;
priors[i_cl] = mass/2 ;
priors[j_cl] = mass/2 ;
for (d = 0 ; d < dimension ; ++d) {
TYPE sigma2 = covariances[j_cl*dimension + d] ;
TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ;
means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ;
covariances[i_cl*dimension + d] = sigma2 ;
}
}
}
#endif
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
double size = - VL_INFINITY_D ;
vl_index best = -1 ;
if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR *
VL_MAX(1.0, (double) self->numData / self->numClusters))
{
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ;
}
/*
Search for the cluster that (approximately)
maximally contribute to make the log-likelihood
small.
*/
for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) {
double size_ ;
if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; }
size_ = - 0.5 * (1.0 + log(2*VL_PI)) ;
for(d = 0 ; d < (signed)dimension ; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
size_ -= 0.5 * log(sigma2) ;
}
size_ *= priors[j_cl] ;
if (self->verbosity > 2) {
VL_PRINTF("gmm: mode %d: prior %f, mass %f, score %f\n",
j_cl, priors[j_cl], mass[j_cl], size_) ;
}
if (size_ > size) {
size = size_ ;
best = j_cl ;
}
}
j_cl = best ;
if (j_cl == i_cl || j_cl < 0) {
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is empty, "
"but no other mode to split could be found\n", i_cl) ;
}
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n",
i_cl, j_cl, priors[j_cl], mass[j_cl], size) ;
}
/*
Search for the dimension with maximum variance.
*/
size = - VL_INFINITY_D ;
best = - 1 ;
for(d = 0; d < (signed)dimension; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
if (sigma2 > size) {
size = sigma2 ;
best = d ;
}
}
/*
Reassign points j_cl (mode to split) to i_cl (empty mode).
*/
{
TYPE mu = means[best + j_cl * self->dimension] ;
for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) {
TYPE p = posteriors[j_cl + self->numClusters * i_d] ;
TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */
if (data[best + i_d * self->dimension] < mu) {
/* assign this point to i_cl */
posteriors[i_cl + self->numClusters * i_d] += p ;
posteriors[j_cl + self->numClusters * i_d] = 0 ;
} else {
/* assign this point to j_cl */
posteriors[i_cl + self->numClusters * i_d] = 0 ;
posteriors[j_cl + self->numClusters * i_d] += q ;
}
}
}
/*
Re-estimate.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,posteriors,priors,covariances,means,data,self->numData) ;
}
return zeroWNum;
}
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self)
{
vl_uindex dim ;
vl_uindex k ;
vl_size numAdjusted = 0 ;
TYPE * cov = (TYPE*)self->covariances ;
double const * lbs = self->sigmaLowBound ;
for (k = 0 ; k < self->numClusters ; ++k) {
vl_bool adjusted = VL_FALSE ;
for (dim = 0 ; dim < self->dimension ; ++dim) {
if (cov[k * self->dimension + dim] < lbs[dim] ) {
cov[k * self->dimension + dim] = lbs[dim] ;
adjusted = VL_TRUE ;
}
}
if (adjusted) { numAdjusted ++ ; }
}
if (numAdjusted > 0 && self->verbosity > 0) {
VL_PRINT("gmm: detected %d of %d modes with at least one dimension "
"with covariance too small (set to lower bound)\n",
numAdjusted, self->numClusters) ;
}
}
/* ---------------------------------------------------------------- */
/* EM - Maximization step */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData)
{
vl_size numClusters = self->numClusters;
vl_index i_d, i_cl;
vl_size dim ;
TYPE * oldMeans ;
double time = 0 ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering maximization step\n") ;
time = vl_get_cpu_time() ;
}
oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ;
memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ;
memset(priors, 0, sizeof(TYPE) * numClusters) ;
memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ;
memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel default(shared) private(i_d, i_cl, dim) \
num_threads(vl_get_max_threads())
#endif
{
TYPE * clusterPosteriorSum_, * means_, * covariances_ ;
#if defined(_OPENMP)
#pragma omp critical
#endif
{
clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ;
means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
}
/*
Accumulate weighted sums and sum of square differences. Once normalized,
these become the means and covariances of each Gaussian mode.
The squared differences will be taken w.r.t. the old means however. In this manner,
one avoids doing two passes across the data. Eventually, these are corrected to account
for the new means properly. In principle, one could set the old means to zero, but
this may cause numerical instabilities (by accumulating large squares).
*/
#if defined(_OPENMP)
#pragma omp for
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++i_d) {
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * self->numClusters] ;
vl_bool calculated = VL_FALSE ;
/* skip very small associations for speed */
if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; }
clusterPosteriorSum_ [i_cl] += p ;
#ifndef VL_DISABLE_AVX
if (vl_get_simd_enabled() && vl_cpu_has_avx()) {
VL_XCAT(_vl_weighted_mean_sse2_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
#ifndef VL_DISABLE_SSE2
if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) {
VL_XCAT(_vl_weighted_mean_sse2_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
if(!calculated) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE x = data[i_d * self->dimension + dim] ;
TYPE mu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = x - mu ;
means_ [i_cl * self->dimension + dim] += p * x ;
covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ;
}
}
}
}
/* accumulate */
#if defined(_OPENMP)
#pragma omp critical
#endif
{
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors [i_cl] += clusterPosteriorSum_ [i_cl];
for (dim = 0 ; dim < self->dimension ; ++dim) {
means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ;
covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ;
}
}
vl_free(means_);
vl_free(covariances_);
vl_free(clusterPosteriorSum_);
}
} /* parallel section */
/* at this stage priors[] contains the total mass of each cluster */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
/* do not update modes that do not recieve mass */
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
means[i_cl * self->dimension + dim] /= mass ;
covariances[i_cl * self->dimension + dim] /= mass ;
}
}
}
/* apply old to new means correction */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE mu = means[i_cl * self->dimension + dim] ;
TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = mu - oldMu ;
covariances[i_cl * self->dimension + dim] -= diff * diff ;
}
}
}
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
{
TYPE sum = 0;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
sum += priors[i_cl] ;
}
sum = VL_MAX(sum, 1e-12) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors[i_cl] /= sum ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: maximization step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
vl_free(oldMeans);
}
/* ---------------------------------------------------------------- */
/* EM iterations */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_gmm_em_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_size iteration, restarted ;
double previousLL = (TYPE)(-VL_INFINITY_D) ;
double LL = (TYPE)(-VL_INFINITY_D) ;
double time = 0 ;
_vl_gmm_prepare_for_data (self, numData) ;
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
for (iteration = 0 ; 1 ; ++ iteration) {
double eps ;
/*
Expectation: assign data to Gaussian modes
and compute log-likelihood.
*/
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering expectation step\n") ;
time = vl_get_cpu_time() ;
}
LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX)
(self->posteriors,
self->numClusters,
numData,
self->priors,
self->means,
self->dimension,
self->covariances,
data) ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: expectation step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
/*
Check the termination conditions.
*/
if (self->verbosity) {
VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n",
iteration, LL, LL - previousLL) ;
}
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because "
"the maximum number of iterations "
"(%d) has been reached.\n", self->maxNumIterations) ;
}
break ;
}
eps = vl_abs_d ((LL - previousLL) / (LL));
if ((iteration > 0) && (eps < 0.00001)) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because the algorithm "
"fully converged (log-likelihood variation = %f).\n", eps) ;
}
break ;
}
previousLL = LL ;
/*
Restart empty modes.
*/
if (iteration > 1) {
restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX)
(self, data);
if ((restarted > 0) & (self->verbosity > 0)) {
VL_PRINTF("gmm: em: %d Gaussian modes restarted because "
"they had become empty.\n", restarted);
}
}
/*
Maximization: reestimate the GMM parameters.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ;
}
return LL;
}
/* ---------------------------------------------------------------- */
/* Kmeans initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_init_with_kmeans_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_size i_d ;
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData);
_vl_gmm_prepare_for_data (self, numData) ;
memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->priors,0,sizeof(TYPE) * self->numClusters) ;
memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ;
/* setup speified KMeans initialization object if any */
if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; }
/* if a KMeans initalization object is still unavailable, create one */
if(self->kmeansInit == NULL) {
vl_size ncomparisons = VL_MAX(numData / 4, 10) ;
vl_size niter = 5 ;
vl_size ntrees = 1 ;
vl_size nrepetitions = 1 ;
VlKMeansAlgorithm algorithm = VlKMeansANN ;
VlKMeansInitialization initialization = VlKMeansRandomSelection ;
VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_set_initialization(kmeansInitDefault, initialization);
vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ;
vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ;
vl_kmeans_set_num_trees (kmeansInitDefault, ntrees);
vl_kmeans_set_algorithm (kmeansInitDefault, algorithm);
vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions);
vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity);
self->kmeansInit = kmeansInitDefault;
self->kmeansInitIsOwner = VL_TRUE ;
}
/* Use k-means to assign data to clusters */
vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters);
vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ;
/* Transform the k-means assignments in posteriors and estimates the mode parameters */
for(i_d = 0; i_d < numData; i_d++) {
((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ;
}
/* Update cluster parameters */
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData);
vl_free(assignments) ;
}
/* ---------------------------------------------------------------- */
/* Random initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX)
(VlGMM * self,
TYPE const * data,
TYPE * initSigma,
vl_size dimension,
vl_size numData)
{
vl_size dim;
vl_uindex i;
TYPE * dataMean ;
memset(initSigma,0,sizeof(TYPE)*dimension) ;
if (numData <= 1) return ;
dataMean = vl_malloc(sizeof(TYPE)*dimension);
memset(dataMean,0,sizeof(TYPE)*dimension) ;
/* find mean of the whole dataset */
for(dim = 0 ; dim < dimension ; dim++) {
for(i = 0 ; i < numData ; i++) {
dataMean[dim] += data[i*dimension + dim];
}
dataMean[dim] /= numData;
}
/* compute variance of the whole dataset */
for(dim = 0; dim < dimension; dim++) {
for(i = 0; i < numData; i++) {
TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ;
initSigma[dim] += diff*diff ;
}
initSigma[dim] /= numData - 1 ;
}
vl_free(dataMean) ;
}
static void
VL_XCAT(_vl_gmm_init_with_rand_data_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_uindex i, k, dim ;
VlKMeans * kmeans ;
_vl_gmm_prepare_for_data(self, numData) ;
/* initilaize priors of gaussians so they are equal and sum to one */
for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; }
/* initialize diagonals of covariance matrices to data covariance */
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData);
for (k = 1 ; k < self->numClusters ; ++ k) {
for(dim = 0; dim < self->dimension; dim++) {
*((TYPE*)self->covariances + k * self->dimension + dim) =
*((TYPE*)self->covariances + dim) ;
}
}
/* use kmeans++ initialization to pick points at random */
kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ;
memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ;
vl_kmeans_delete(kmeans) ;
}
/* ---------------------------------------------------------------- */
#else /* VL_GMM_INSTANTIATING */
/* ---------------------------------------------------------------- */
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#endif
/* VL_GMM_INSTANTIATING */
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/** @brief Create a new GMM object by copy
** @param self object.
** @return new copy.
**
** Most parameters, including the cluster priors, means, and
** covariances are copied. Data posteriors (available after
** initalization or EM) are not; nor is the KMeans object used for
** initialization, if any.
**/
VlGMM *
vl_gmm_new_copy (VlGMM const * self)
{
vl_size size = vl_get_type_size(self->dataType) ;
VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters);
gmm->initialization = self->initialization;
gmm->maxNumIterations = self->maxNumIterations;
gmm->numRepetitions = self->numRepetitions;
gmm->verbosity = self->verbosity;
gmm->LL = self->LL;
memcpy(gmm->means, self->means, size*self->numClusters*self->dimension);
memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension);
memcpy(gmm->priors, self->priors, size*self->numClusters);
return gmm ;
}
/** @brief Initialize mixture before EM takes place using random initialization
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
void
vl_gmm_init_with_rand_data
(VlGMM * self,
void const * data,
vl_size numData)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
}
/** @brief Initializes the GMM using KMeans
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
** @param kmeansInit KMeans object to use.
**/
void
vl_gmm_init_with_kmeans
(VlGMM * self,
void const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_gmm_init_with_kmeans_f
(self, (float const *)data, numData, kmeansInit) ;
break ;
case VL_TYPE_DOUBLE :
_vl_gmm_init_with_kmeans_d
(self, (double const *)data, numData, kmeansInit) ;
break ;
default:
abort() ;
}
}
#if 0
#include<fenv.h>
#endif
/** @brief Run GMM clustering - includes initialization and EM
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_cluster (VlGMM * self,
void const * data,
vl_size numData)
{
void * bestPriors = NULL ;
void * bestMeans = NULL;
void * bestCovariances = NULL;
void * bestPosteriors = NULL;
vl_size size = vl_get_type_size(self->dataType) ;
double bestLL = -VL_INFINITY_D;
vl_uindex repetition;
assert(self->numRepetitions >=1) ;
bestPriors = vl_malloc(size * self->numClusters) ;
bestMeans = vl_malloc(size * self->dimension * self->numClusters) ;
bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ;
bestPosteriors = vl_malloc(size * self->numClusters * numData) ;
#if 0
feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
#endif
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double LL ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
/* initialize a new mixture model */
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ;
case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ;
case VlGMMCustom : break ;
default: abort() ;
}
if (self->verbosity) {
VL_PRINTF("gmm: model initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
/* fit the model to data by running EM */
timeRef = vl_get_cpu_time () ;
LL = vl_gmm_em (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n",
vl_get_cpu_time() - timeRef, LL) ;
}
if (LL > bestLL || repetition == 0) {
void * temp ;
temp = bestPriors ;
bestPriors = self->priors ;
self->priors = temp ;
temp = bestMeans ;
bestMeans = self->means ;
self->means = temp ;
temp = bestCovariances ;
bestCovariances = self->covariances ;
self->covariances = temp ;
temp = bestPosteriors ;
bestPosteriors = self->posteriors ;
self->posteriors = temp ;
bestLL = LL;
}
}
vl_free (self->priors) ;
vl_free (self->means) ;
vl_free (self->covariances) ;
vl_free (self->posteriors) ;
self->priors = bestPriors ;
self->means = bestMeans ;
self->covariances = bestCovariances ;
self->posteriors = bestPosteriors ;
self->LL = bestLL;
if (self->verbosity) {
VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ;
}
return bestLL ;
}
/** @brief Invoke the EM algorithm.
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_em (VlGMM * self, void const * data, vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT:
return _vl_gmm_em_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE:
return _vl_gmm_em_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
return 0 ;
}
/** @brief Explicitly set the initial means for EM.
** @param self GMM object instance.
** @param means initial values of means.
**/
void
vl_gmm_set_means (VlGMM * self, void const * means)
{
memcpy(self->means,means,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial sigma diagonals for EM.
** @param self GMM object instance.
** @param covariances initial values of covariance matrix diagonals.
**/
void vl_gmm_set_covariances (VlGMM * self, void const * covariances)
{
memcpy(self->covariances,covariances,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial priors of the gaussians.
** @param self GMM object instance.
** @param priors initial values of the gaussian priors.
**/
void vl_gmm_set_priors (VlGMM * self, void const * priors)
{
memcpy(self->priors,priors,
self->numClusters * vl_get_type_size(self->dataType));
}
/* VL_GMM_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_GMM_INSTANTIATING
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
#include "ios_error.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
#if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
#include "MagickCore/threshold-map.h"
#else
static const char *const
BuiltinMap=
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
#endif
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse)
(void) FormatLocaleFile(thread_stdout,"%.*g%%\n",GetMagickPrecision(),threshold);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorThresholdImage() forces all pixels in the color range to white
% otherwise black.
%
% The format of the ColorThresholdImage method is:
%
% MagickBooleanType ColorThresholdImage(Image *image,
% const PixelInfo *start_color,const PixelInfo *stop_color,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o start_color, stop_color: define the start and stop color range. Any
% pixel within the range returns white otherwise black.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorThresholdImage(Image *image,
const PixelInfo *start_color,const PixelInfo *stop_color,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
start,
stop;
ssize_t
y;
/*
Color threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=AcquireImageColormap(image,2,exception);
if (status == MagickFalse)
return(status);
start=(*start_color);
stop=(*stop_color);
switch (image->colorspace)
{
case HCLColorspace:
{
ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case LabColorspace:
{
ConvertRGBToLab(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
default:
{
start.red*=QuantumScale;
start.green*=QuantumScale;
start.blue*=QuantumScale;
stop.red*=QuantumScale;
stop.green*=QuantumScale;
stop.blue*=QuantumScale;
break;
}
}
start.red*=QuantumRange;
start.green*=QuantumRange;
start.blue*=QuantumRange;
stop.red*=QuantumRange;
stop.green*=QuantumRange;
stop.blue*=QuantumRange;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickBooleanType
foreground = MagickTrue;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((q[i] < GetPixelInfoChannel(&start,channel)) ||
(q[i] > GetPixelInfoChannel(&stop,channel)))
foreground=MagickFalse;
}
SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->colorspace=sRGBColorspace;
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=thread_stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels to dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with an ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=(Quantum) 0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=(Quantum) 0;
else
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
for.c | #include <stdio.h>
#include <omp.h>
int main()
{
omp_set_num_threads(4);
/*
The code below represents how to use for loop without the for directive
*/
int i = 0;
// 4 threads will be created
// ID : 0 will execute i=0-2
// ID : 1 will execute i=3-5
// ID : 2 will execute i=6-8
// ID : 3 will execute i=9-11
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
for (int i = thread_id * 3; i <= thread_id * 3 + 2 && i<=10; i++)
{
printf("Thread : %d || %d\n", thread_id, i);
}
}
printf("\n\n");
/*
Using the for directive
*/
#pragma omp parallel for
for (int i = 0; i <= 10; i++)
{
int thread_id = omp_get_thread_num();
printf("Thread : %d || %d\n", thread_id, i);
}
} |
main.c | #pragma clang diagnostic push
#pragma ide diagnostic ignored "openmp-use-default-none"
#include <stdio.h>
#include <malloc.h>
#include <omp.h>
#include <stdlib.h>
#include "omp.h"
void merge(int *array, const int *arraySize, int *tmp, const int *tmpSize, int *result) {
int i = 0;
while (array <= arraySize && tmp <= tmpSize) {
if ((*array) < (*tmp)) {
result[i++] = *array;
array++;
} else {
result[i++] = *tmp;
tmp++;
}
}
//finishing up the lower half
while (array <= arraySize) {
result[i++] = *array;
array++;
}
//finishing up the upper half
while (tmp <= tmpSize) {
result[i++] = *tmp;
tmp++;
}
}
int compareFunction(const void *a, const void *b) {
return (*(int *) a - *(int *) b);
}
void multisort(int *array, int *space, int arraySize) {
// If there are not enough elements, use quicksort
if (arraySize < 4) {
qsort(array, arraySize, sizeof(int), compareFunction);
return;
}
int quarter = arraySize / 4;
int *startA = array;
int *spaceA = space;
int *startB = startA + quarter;
int *spaceB = spaceA + quarter;
int *startC = startB + quarter;
int *spaceC = spaceB + quarter;
int *startD = startC + quarter;
int *spaceD = spaceC + quarter;
#pragma omp task
multisort(startA, spaceA, quarter);
#pragma omp task
multisort(startB, spaceB, quarter);
#pragma omp task
multisort(startC, spaceC, quarter);
#pragma omp task
multisort(startD, spaceD, arraySize - 3 * quarter);
#pragma omp taskwait
#pragma omp task
merge(startA, startA + quarter - 1, startB, startB + quarter - 1, spaceA);
#pragma omp task
merge(startC, startC + quarter - 1, startD, array + arraySize - 1, spaceC);
#pragma omp taskwait
merge(spaceA, spaceC - 1, spaceC, spaceA + arraySize - 1, startA);
}
int main() {
int i, arraySize, numThreads;
int *array, *space;
double start, end, timeInMilliseconds;
printf("-------------------------------\n");
printf("Parallel Systems - Assignment 2A\n");
printf("-------------------------------\n\n");
printf("What array size fits you? N=");
scanf("%d", &arraySize);
array = (int *) malloc(arraySize * sizeof(int));
space = (int *) malloc(arraySize * sizeof(int));
printf("\nEnter number of threads for OpenMP: ");
scanf("%d", &numThreads);
printf("\nSetting %d threads", numThreads);
omp_set_num_threads(numThreads);
for (i = 0; i < arraySize; i++) {
printf("\nGive element [%d]=", i);
scanf("%d", &array[i]);
}
// Measure performance
start = omp_get_wtime();
#pragma omp parallel
#pragma omp single
multisort(array, space, arraySize);
end = omp_get_wtime();
timeInMilliseconds = (end - start) * 1000;
printf("\nArray after multisort: [ ");
for (i = 0; i < arraySize; i++) printf("%d ", array[i]);
printf("]\n");
printf("\n-----------------------------------------\n");
printf("Functions executed in %.4f milliseconds\n", timeInMilliseconds);
printf("-----------------------------------------\n");
FILE *f = fopen("results.txt", "a");
if (f == NULL) {
printf("Error opening file!\n");
exit(1);
}
fprintf(f, "N: %d\t Threads: %d\t Time: %.4f ms\n", arraySize, numThreads, timeInMilliseconds);
fclose(f);
free(array);
return 0;
}
#pragma clang diagnostic pop |
mregn.c | /*
Copyright (c) 2009-2011, Jun Namikawa <jnamika@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "utils.h"
#include "mregn.h"
static inline void copy_gn_output_to_gate (
struct mre_state *mre_s,
struct rnn_state *gn_s,
int gn_delay_length)
{
int length;
length = gn_s->length + gn_delay_length;
if (length > mre_s->length) length = mre_s->length;
for (int i = 0; i < mre_s->mre->expert_num; i++) {
for (int n = gn_delay_length; n < length; n++) {
mre_s->gate[i][n] = gn_s->out_state[n-gn_delay_length][i];
}
}
}
void mregn_forward_dynamics (
struct mre_state *mre_s,
struct rnn_state *gn_s,
int gn_delay_length)
{
rnn_forward_dynamics(gn_s);
copy_gn_output_to_gate(mre_s, gn_s, gn_delay_length);
mre_forward_dynamics(mre_s);
}
void mregn_forward_dynamics_forall (
struct mixture_of_rnn_experts *mre,
struct recurrent_neural_network *gn,
int gn_delay_length)
{
rnn_forward_dynamics_forall(gn);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
copy_gn_output_to_gate(mre->mre_s + i, gn->rnn_s + i, gn_delay_length);
}
mre_forward_dynamics_forall(mre);
}
void mregn_forward_dynamics_in_closed_loop (
struct mre_state *mre_s,
struct rnn_state *gn_s,
int mre_delay_length,
int gn_delay_length)
{
const struct rnn_parameters *gn_p = gn_s->rnn_p;
double in_state[gn_p->in_state_size];
assert(mre_s->mre->in_state_size <= mre_s->mre->out_state_size);
assert(mre_s->mre->expert_num == gn_p->out_state_size);
assert(gn_p->in_state_size == gn_p->out_state_size +
mre_s->mre->in_state_size);
for (int n = 0; n < gn_s->length && n < mre_s->length; n++) {
struct rnn_state *rnn_s;
if (n == 0) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p, rnn_s->in_state[0],
rnn_s->init_c_inter_state, rnn_s->init_c_state,
rnn_s->c_inputsum[0], rnn_s->c_inter_state[0],
rnn_s->c_state[0], rnn_s->o_inter_state[0],
rnn_s->out_state[0]);
}
} else if (n < mre_delay_length) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p, rnn_s->in_state[n],
rnn_s->c_inter_state[n-1], rnn_s->c_state[n-1],
rnn_s->c_inputsum[n], rnn_s->c_inter_state[n],
rnn_s->c_state[n], rnn_s->o_inter_state[n],
rnn_s->out_state[n]);
}
} else {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p,
mre_s->out_state[n-mre_delay_length],
rnn_s->c_inter_state[n-1], rnn_s->c_state[n-1],
rnn_s->c_inputsum[n], rnn_s->c_inter_state[n],
rnn_s->c_state[n], rnn_s->o_inter_state[n],
rnn_s->out_state[n]);
}
}
if (n >= gn_delay_length) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
mre_s->gate[i][n] = gn_s->out_state[n-gn_delay_length][i];
}
}
for (int i = 0; i < mre_s->mre->out_state_size; i++) {
mre_s->out_state[n][i] = 0;
for (int j = 0; j < mre_s->mre->expert_num; j++) {
mre_s->out_state[n][i] += mre_s->gate[j][n] *
mre_s->expert_rnn_s[j]->out_state[n][i];
}
}
if (n == 0) {
rnn_forward_map(gn_p, gn_s->in_state[0], gn_s->init_c_inter_state,
gn_s->init_c_state, gn_s->c_inputsum[0],
gn_s->c_inter_state[0], gn_s->c_state[0],
gn_s->o_inter_state[0], gn_s->out_state[0]);
} else {
if (n < gn_delay_length) {
memcpy(in_state, gn_s->in_state[n], sizeof(double) *
gn_p->out_state_size);
} else {
memcpy(in_state, gn_s->out_state[n-gn_delay_length],
sizeof(double) * gn_p->out_state_size);
}
if (n < mre_delay_length) {
memcpy(in_state + gn_p->out_state_size, gn_s->in_state[n] +
gn_p->out_state_size, sizeof(double) *
mre_s->mre->in_state_size);
} else {
memcpy(in_state + gn_p->out_state_size,
mre_s->out_state[n-mre_delay_length], sizeof(double) *
mre_s->mre->in_state_size);
}
rnn_forward_map(gn_p, in_state, gn_s->c_inter_state[n-1],
gn_s->c_state[n-1], gn_s->c_inputsum[n],
gn_s->c_inter_state[n], gn_s->c_state[n],
gn_s->o_inter_state[n], gn_s->out_state[n]);
}
}
}
void mregn_forward_dynamics_in_closed_loop_forall (
struct mixture_of_rnn_experts *mre,
struct recurrent_neural_network *gn,
int mre_delay_length,
int gn_delay_length)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
mregn_forward_dynamics_in_closed_loop(mre->mre_s + i, gn->rnn_s + i,
mre_delay_length, gn_delay_length);
}
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(4*t3+Nx-9,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),8*t4+6);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
bd98141de62665dd572234ce7297963e19357a70.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int initdamp(struct dataobj *restrict damp_vec, const float h_x, const float h_y, const int x_M, const int x_m, const int y_M, const int y_m, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, struct profiler * timers, const int nthreads)
{
float (*restrict damp)[damp_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]]) damp_vec->data;
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
damp[abc_x_l + 1][y + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(2.5e-2F*x_m - 2.5e-2F*abc_x_l + 1.025F)) + 2.5904082296183e-1F*fabs(2.5e-2F*x_m - 2.5e-2F*abc_x_l + 1.025F))/h_x;
}
}
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
damp[abc_x_r + 1][y + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(-2.5e-2F*x_M + 2.5e-2F*abc_x_r + 1.025F)) + 2.5904082296183e-1F*fabs(-2.5e-2F*x_M + 2.5e-2F*abc_x_r + 1.025F))/h_x;
}
}
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x = x_m; x <= x_M; x += 1)
{
for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1)
{
damp[x + 1][abc_y_l + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(2.5e-2F*y_m - 2.5e-2F*abc_y_l + 1.025F)) + 2.5904082296183e-1F*fabs(2.5e-2F*y_m - 2.5e-2F*abc_y_l + 1.025F))/h_y;
}
for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1)
{
damp[x + 1][abc_y_r + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(-2.5e-2F*y_M + 2.5e-2F*abc_y_r + 1.025F)) + 2.5904082296183e-1F*fabs(-2.5e-2F*y_M + 2.5e-2F*abc_y_r + 1.025F))/h_y;
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
return 0;
}
|
gru_utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include <vector>
#include "lite/backends/arm/math/quantize.h"
#include "lite/backends/arm/math/sgemm.h"
namespace paddle {
namespace lite {
namespace arm {
namespace math {
template <typename T>
struct GRUMetaValue {
T* gate_weight; // W_{uh}h_{t-1} and W_{rh}h_{t-1}
T* state_weight; // W_{ch}
T* gate_value; // update_gate u_{t}, reset_gate r_{t} and cell_gate
// \hat{h_{t}}
T* reset_output_value; // r_{t}\odot h_{t-1}
T* output_value; // H_{t}
T* prev_out_value; // H_{t-1}
int8_t* gate_weight_int8; // int8_t W_{uh}h_{t-1} and W_{rh}h_{t-1}
int8_t* state_weight_int8; // int8_t W_{ch}
};
template <typename Dtype>
inline void gru_add_with_bias(
const Dtype* din, const Dtype* bias, Dtype* dout, int batch, int size);
template <>
inline void gru_add_with_bias(
const float* din, const float* bias, float* dout, int batch, int size) {
#pragma omp parallel for
for (int i = 0; i < batch; ++i) {
int j = 0;
auto din_batch = din + i * size;
auto dout_batch = dout + i * size;
float32x4_t vb0 = vld1q_f32(bias);
float32x4_t vin0 = vld1q_f32(din_batch);
float32x4_t vout0;
float32x4_t vout1;
float32x4_t vin1;
float32x4_t vb1;
for (; j < size - 7; j += 8) {
vin1 = vld1q_f32(din_batch + j + 4);
vb1 = vld1q_f32(bias + j + 4);
vout0 = vaddq_f32(vb0, vin0);
vout1 = vaddq_f32(vb1, vin1);
vb0 = vld1q_f32(bias + j + 8);
vin0 = vld1q_f32(din_batch + j + 8);
vst1q_f32(dout_batch + j, vout0);
vst1q_f32(dout_batch + j + 4, vout1);
}
for (; j < size; ++j) {
dout_batch[j] = din_batch[j] + bias[j];
}
}
}
template <lite_api::ActivationType Act>
static void gru_unit_reset_act_impl(float* updata_gate,
int stride_update,
float* reset_gate,
int stride_reset,
const float* hidden_prev,
int stride_hidden_prev,
float* reset_hidden_prev,
int stride_reset_hidden_prev,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
for (; i < frame_size - 7; i += 8) {
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vr0 = vld1q_f32(reset_gate + i);
float32x4_t vr1 = vld1q_f32(reset_gate + i + 4);
float32x4_t vau0 = lite::arm::math::vactive_f32<Act>(vu0);
float32x4_t vau1 = lite::arm::math::vactive_f32<Act>(vu1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t var0 = lite::arm::math::vactive_f32<Act>(vr0);
float32x4_t var1 = lite::arm::math::vactive_f32<Act>(vr1);
vst1q_f32(updata_gate + i, vau0);
vst1q_f32(updata_gate + i + 4, vau1);
float32x4_t vres0 = vmulq_f32(vpre0, var0);
float32x4_t vres1 = vmulq_f32(vpre1, var1);
vst1q_f32(reset_gate + i, var0);
vst1q_f32(reset_gate + i + 4, var1);
vst1q_f32(reset_hidden_prev + i, vres0);
vst1q_f32(reset_hidden_prev + i + 4, vres1);
}
for (; i < frame_size; ++i) {
updata_gate[i] = lite::arm::math::active_f32<Act>(updata_gate[i]);
reset_gate[i] = lite::arm::math::active_f32<Act>(reset_gate[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
reset_hidden_prev[i] = reset_gate[i] * prev;
}
updata_gate += stride_update;
reset_gate += stride_reset;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
reset_hidden_prev += stride_reset_hidden_prev;
}
}
template <lite_api::ActivationType Act>
static void gru_unit_out_act_impl(bool origin_mode,
float* updata_gate,
int stride_update,
float* cell_state,
int stride_cell_state,
const float* hidden_prev,
int stride_hidden_prev,
float* hidden,
int stride_hidden,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
if (origin_mode) {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vac0, vu0, vac0);
float32x4_t vh1 = vmlsq_f32(vac1, vu1, vac1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vpre0);
vh1 = vmlaq_f32(vh1, vu1, vpre1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
if (hidden_prev) {
prev = hidden_prev[i];
}
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
hidden[i] =
cell_state[i] * (1.f - updata_gate[i]) + updata_gate[i] * prev;
}
} else {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vpre0, vpre0, vu0);
float32x4_t vh1 = vmlsq_f32(vpre1, vpre1, vu1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vac0);
vh1 = vmlaq_f32(vh1, vu1, vac1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
hidden[i] =
prev * (1.f - updata_gate[i]) + updata_gate[i] * cell_state[i];
}
}
updata_gate += stride_update;
cell_state += stride_cell_state;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
hidden += stride_hidden;
}
}
inline void gru_unit_reset_act(lite_api::ActivationType act_type,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto reset_gate = value.gate_value + frame_size;
auto hidden_prev = value.prev_out_value;
auto reset_hidden_prev = value.reset_output_value;
int stride_update = 3 * frame_size;
int stride_reset = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_reset_hidden_prev = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_reset_act_impl<lite_api::ActivationType::kIndentity>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_reset_act_impl<lite_api::ActivationType::kTanh>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_reset_act_impl<lite_api::ActivationType::kSigmoid>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_reset_act_impl<lite_api::ActivationType::kRelu>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
default:
break;
}
}
inline void gru_unit_out_act(lite_api::ActivationType act_type,
bool origin_mode,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto cell_state = value.gate_value + 2 * frame_size;
auto hidden_prev = value.prev_out_value;
auto hidden = value.output_value;
int stride_update = 3 * frame_size;
int stride_cell_state = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_hidden = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_out_act_impl<lite_api::ActivationType::kIndentity>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_out_act_impl<lite_api::ActivationType::kTanh>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_out_act_impl<lite_api::ActivationType::kSigmoid>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_out_act_impl<lite_api::ActivationType::kRelu>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
default:
break;
}
}
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite_api::ActivationType active_node,
const lite_api::ActivationType active_gate,
bool origin_mode,
ARMContext* ctx) {
operators::ActivationParam act_param;
act_param.has_active = false;
// Calculate W_{uh}h_{t-1} and W_{rh}h_{t-1}
// Get u_{t} and r_{t} before applying activation
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size * 2,
frame_size,
1.f,
value.prev_out_value,
frame_size,
value.gate_weight,
frame_size * 2,
1.f,
value.gate_value,
frame_size * 3,
nullptr,
false,
act_param,
ctx);
}
// Get u_{t} and r_{t} after applying activation
// Get r_{t}\odot h_{t-1}, save it to value.reset_output_value
gru_unit_reset_act(active_gate, value, frame_size, batch_size);
// Get W_{ch}(r_{t}\odot h_{t-1}), and it adds to cell_gate \hat{h_{t}}
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size,
frame_size,
1.f,
value.reset_output_value,
frame_size,
value.state_weight,
frame_size,
1.f,
value.gate_value + frame_size * 2,
frame_size * 3,
nullptr,
false,
act_param,
ctx);
}
// Apply activation to cell_gate \hat{h_{t}} and get final h_{t}
gru_unit_out_act(active_node, origin_mode, value, frame_size, batch_size);
}
static void quant_compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite_api::ActivationType active_node,
const lite_api::ActivationType active_gate,
bool origin_mode,
std::vector<float> weight_scale,
int bit_length,
ARMContext* ctx) {
operators::ActivationParam act_param;
act_param.has_active = false;
// Calculate W_{uh}h_{t-1} and W_{rh}h_{t-1}
// Get u_{t} and r_{t} before applying activation
if (value.prev_out_value) {
// quantize h_{t-1}
int prev_out_size = batch_size * frame_size;
float prev_out_threshold =
lite::arm::math::FindAbsMax(value.prev_out_value, prev_out_size);
float prev_out_scale =
lite::arm::math::GetScale(prev_out_threshold, bit_length);
std::unique_ptr<int8_t[]> prev_out_value_int8(new int8_t[prev_out_size]);
lite::arm::math::QuantizeTensor(value.prev_out_value,
prev_out_value_int8.get(),
prev_out_size,
prev_out_scale);
// update scale
std::vector<float> scales(batch_size, weight_scale[0]);
for (auto&& x : scales) {
x *= prev_out_scale;
}
// gemm_s8
std::unique_ptr<float[]> out_data(new float[batch_size * frame_size * 2]);
lite::arm::math::gemm_s8(false,
false,
batch_size,
frame_size * 2,
frame_size,
prev_out_value_int8.get(),
value.gate_weight_int8,
out_data.get(),
nullptr,
false,
scales.data(),
act_param,
ctx);
for (int i = 0; i < batch_size; i++) {
float* dest = value.gate_value + i * frame_size * 3;
float* src = out_data.get() + i * frame_size * 2;
for (int j = 0; j < frame_size * 2; j++) {
dest[j] += src[j];
}
}
}
// Get u_{t} and r_{t} after applying activation
// Get r_{t}\odot h_{t-1}, save it to value.reset_output_value
gru_unit_reset_act(active_gate, value, frame_size, batch_size);
// Get W_{ch}(r_{t}\odot h_{t-1}), and it adds to cell_gate \hat{h_{t}}
if (value.prev_out_value) {
// quantize r_{t}\odot h_{t-1}
int reset_out_size = batch_size * frame_size;
float reset_out_threshold =
lite::arm::math::FindAbsMax(value.reset_output_value, reset_out_size);
float reset_out_scale =
lite::arm::math::GetScale(reset_out_threshold, bit_length);
std::unique_ptr<int8_t[]> reset_out_value_int8(
new int8_t[reset_out_size]);
lite::arm::math::QuantizeTensor(value.reset_output_value,
reset_out_value_int8.get(),
reset_out_size,
reset_out_scale);
std::vector<float> scales(batch_size, weight_scale[0]);
for (auto&& x : scales) {
x *= reset_out_scale;
}
std::unique_ptr<float[]> out_data(new float[batch_size * frame_size]);
lite::arm::math::gemm_s8(false,
false,
batch_size,
frame_size,
frame_size,
reset_out_value_int8.get(),
value.state_weight_int8,
out_data.get(),
nullptr,
false,
scales.data(),
act_param,
ctx);
for (int i = 0; i < batch_size; i++) {
float* dest = value.gate_value + frame_size * 2 + i * frame_size * 3;
float* src = out_data.get() + i * frame_size;
for (int j = 0; j < frame_size; j++) {
dest[j] += src[j];
}
}
}
// Apply activation to cell_gate \hat{h_{t}} and get final h_{t}
gru_unit_out_act(active_node, origin_mode, value, frame_size, batch_size);
}
};
} // namespace math
} // namespace arm
} // namespace lite
} // namespace paddle
|
core_dgeqrt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgeqrt.c, normal z -> d, Fri Sep 28 17:38:20 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_geqrt
*
* Computes a QR factorization of an m-by-n tile A:
* The factorization has the form
* \f[
* A = Q \times R
* \f]
* The tile Q is represented as a product of elementary reflectors
* \f[
* Q = H(1) H(2) ... H(k),
* \f]
* where \f$ k = min(m,n) \f$.
*
* Each \f$ H(i) \f$ has the form
* \f[
* H(i) = I - \tau \times v \times v^T
* \f]
* where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with
* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
* and \f$ tau \f$ in tau(i).
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the tile A. m >= 0.
*
* @param[in] n
* The number of columns of the tile A. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A
* On entry, the m-by-n tile A.
* On exit, the elements on and above the diagonal of the array
* contain the min(m,n)-by-n upper trapezoidal tile R (R is
* upper triangular if m >= n); the elements below the diagonal,
* with the array tau, represent the orthogonal tile Q as a
* product of elementary reflectors (see Further Details).
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* The ib-by-n triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length n.
*
* @param work
* Auxiliary workspace array of length ib*n.
*
* @param[in] lwork
* Size of the array work. Should be at least ib*n.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dgeqrt(int m, int n, int ib,
double *A, int lda,
double *T, int ldt,
double *tau,
double *work)
{
// Check input arguments.
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -2;
}
if ((ib < 0) || ( (ib == 0) && (m > 0) && (n > 0) )) {
plasma_coreblas_error("illegal value of ib");
return -3;
}
if (A == NULL) {
plasma_coreblas_error("NULL A");
return -4;
}
if (lda < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda");
return -5;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -6;
}
if (ldt < imax(1, ib) && ib > 0) {
plasma_coreblas_error("illegal value of ldt");
return -7;
}
if (tau == NULL) {
plasma_coreblas_error("NULL tau");
return -8;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -9;
}
// quick return
if (m == 0 || n == 0 || ib == 0)
return PlasmaSuccess;
int k = imin(m, n);
for (int i = 0; i < k; i += ib) {
int sb = imin(ib, k-i);
LAPACKE_dgeqr2_work(LAPACK_COL_MAJOR,
m-i, sb,
&A[lda*i+i], lda,
&tau[i], work);
LAPACKE_dlarft_work(LAPACK_COL_MAJOR,
lapack_const(PlasmaForward),
lapack_const(PlasmaColumnwise),
m-i, sb,
&A[lda*i+i], lda,
&tau[i],
&T[ldt*i], ldt);
if (n > i+sb) {
LAPACKE_dlarfb_work(LAPACK_COL_MAJOR,
lapack_const(PlasmaLeft),
lapack_const(PlasmaTrans),
lapack_const(PlasmaForward),
lapack_const(PlasmaColumnwise),
m-i, n-i-sb, sb,
&A[lda*i+i], lda,
&T[ldt*i], ldt,
&A[lda*(i+sb)+i], lda,
work, n-i-sb);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_dgeqrt(int m, int n, int ib,
double *A, int lda,
double *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n]) \
depend(out:T[0:ib*n])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
double *tau = ((double*)work.spaces[tid]);
// Call the kernel.
int info = plasma_core_dgeqrt(m, n, ib,
A, lda,
T, ldt,
tau,
tau+n);
if (info != PlasmaSuccess) {
plasma_error("core_dgeqrt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
main.c | int J;
void foo(int N, double *restrict A) {
#pragma omp parallel default(shared)
{
#pragma omp for lastprivate(J) firstprivate(J)
for (int I = 0; I < N; ++I) {
J = N + I;
A[I] = I + J;
}
}
}
|
omp_example.c | #include <omp.h>
#include <stdio.h>
int main()
{
int i;
int n=10;
float a[n];
float b[n];
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel for
/* i is private by default */
for (i = 1; i < n; i++)
{
b[i] = (a[i] + a[i-1]) / 2.0;
}
for (i = 1; i < n; i++)
printf("%f ", b[i]);
printf("\n");
}
|
median_filter_new.c | /* This file was taken from modified dcraw published by Paul Lee
on January 23, 2009, taking dcraw ver.8.90/rev.1.417
as basis.
http://sites.google.com/site/demosaicalgorithms/modified-dcraw
As modified dcraw source code was published, the release under
GPL Version 2 or later option could be applied, so this file
is taken under this premise.
*/
/*
Copyright (C) 2009 Paul Lee
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
differential median filter
*/
#define PIX_SORT(a,b) { if ((a)>(b)) {temp=(a);(a)=(b);(b)=temp;} }
void CLASS median_filter_new()
{
int (*mf)[3], (*pc)[3], p[9], indx, row, col, c, d, temp, i, v0, pass;
int p11, p12, p13, p31, p32, p33;
p11 = -width-1;
p12 = p11+1;
p13 = p12+1;
p31 = width-1;
p32 = p31+1;
p33 = p32+1;
/* Allocate buffer for 3x3 median filter */
mf = (int (*)[3]) calloc(width*height, sizeof *mf);
for (pass=1; pass <= med_passes; pass++) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("3x3 differential median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
/* Compute median(R-G) and median(B-G) */
for (indx=0; indx < height*width; indx++)
mf[indx][c] = image[indx][c] - image[indx][1];
/* Apply 3x3 median fileter */
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for private(row,col,pc,p)
#endif
for (row=1; row < height-1; row++)
for (col=1; col < width-1; col++) {
pc = mf + row*width+col;
/* Assign 3x3 differential color values */
p[0] = pc[p11][c]; p[1] = pc[p12][c]; p[2] = pc[p13][c];
p[3] = pc[ -1][c]; p[4] = pc[ 0][c]; p[5] = pc[ 1][c];
p[6] = pc[p31][c]; p[7] = pc[p32][c]; p[8] = pc[p33][c];
/* Sort for median of 9 values */
PIX_SORT(p[1],p[2]); PIX_SORT(p[4], p[5]); PIX_SORT(p[7],p[8]);
PIX_SORT(p[0],p[1]); PIX_SORT(p[3], p[4]); PIX_SORT(p[6],p[7]);
PIX_SORT(p[1],p[2]); PIX_SORT(p[4], p[5]); PIX_SORT(p[7],p[8]);
PIX_SORT(p[0],p[3]); PIX_SORT(p[5], p[8]); PIX_SORT(p[4],p[7]);
PIX_SORT(p[3],p[6]); PIX_SORT(p[1], p[4]); PIX_SORT(p[2],p[5]);
PIX_SORT(p[4],p[7]); PIX_SORT(p[4], p[2]); PIX_SORT(p[6],p[4]);
PIX_SORT(p[4],p[2]);
pc[0][1] = p[4];
}
for (row=1; row < height-1; row++)
for (col=1; col < width-1; col++) {
pc = mf + row*width+col;
pc[0][c] = pc[0][1]; }
}
/* red/blue at GREEN pixel locations */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
indx = row*width+col;
for (i=0; i < 2; c=2-c, i++) {
v0 = image[indx][1]+mf[indx][c];
image[indx][c] = CLIP(v0);
}
}
/* red/blue at BLUE/RED pixel locations */
for (row=2; row < height-2; row++)
for (col=2+(FC(row,2) & 1), c=2-FC(row,col); col < width-2; col+=2) {
indx = row*width+col;
v0 = image[indx][1]+mf[indx][c];
image[indx][c] = CLIP(v0);
}
/* green at RED/BLUE location */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=FC(row,col); col < width-3; col+=2) {
indx = row*width+col;
d = 2 - c;
v0 = (image[indx][c]-mf[indx][c]+image[indx][d]-mf[indx][d]+1) >> 1;
image[indx][1] = CLIP(v0);
}
}
/* Free buffer */
free(mf);
}
#undef PIX_SORT
|
sort.c | /**
* @file sort.c
* @author timoteus <mail@timoteus.se>
*/
#include "sort.h"
#include "print.h"
/* Use default values if not manually overridden. */
int rock_radix_bits = ROCK_USE_DEFAULT;
int rock_num_threads = ROCK_USE_DEFAULT;
static inline void
indx_sort_thread(rock_desc_t *desc,
rock_uint_t num_dims,
rock_uint_t *dims,
rock_perm_t *perm,
rock_perm_t *perm_alt,
rock_indx_t *indx,
rock_indx_t *indx_alt,
bool *swapped,
rock_uint_t size,
rock_uint_t num_bins,
rock_uint_t *bins,
rock_uint_t bins_offset,
rock_uint_t indx_offset,
int *num_passes)
{
int num_threads = omp_get_num_threads();
int dim_num = num_dims - 1;
int dim = dims[dim_num];
int offset = 0;
int dim_offset = 0;
int dim_bits = 0;
int num_bits = 0;
if (swapped != NULL) {
*swapped = false;
}
bool first_pass = true;
int total_bits = 0;
int processed_bits = 0;
for (rock_uint_t i = 0; i < num_dims; i++) {
total_bits += desc->bit_width[dims[i]];
}
while (processed_bits < total_bits) {
(*num_passes)++;
/* Update extraction information. */
if (first_pass || offset == dim_offset + dim_bits) {
dim = dims[dim_num];
dim_offset = desc->bit_offset[dim];
dim_bits = desc->bit_width[dim];
num_bits = rock_radix_bits;
offset = dim_offset;
dim_num--;
}
if (offset + rock_radix_bits > dim_offset + dim_bits) {
num_bits = dim_offset + dim_bits - offset;
}
if (!first_pass) {
rock_indx_swap(&indx, &indx_alt);
if (perm != NULL) {
rock_perm_swap(&perm, &perm_alt);
}
}
rock_uint_t mask = (~(rock_uint_t)(~(rock_uint_t)
0 << num_bits) << offset);
#pragma omp barrier
/* Phase 1: Histogram. */
for (rock_uint_t i = 0; i < size; i++) {
if (perm != NULL && first_pass) {
perm->v[indx_offset+i] = indx_offset+i;
}
rock_uint_t val = (indx->v[indx_offset+i] & mask) >> offset;
bins[bins_offset+val]++;
}
#pragma omp barrier
#pragma omp master
{
/* Phase 2: Prefix sum. */
rock_uint_t total = 0;
for (rock_uint_t i = 0; i < num_bins; i++) {
for (rock_uint_t k = 0; k < num_threads; k++) {
rock_uint_t old = bins[(k*num_bins) + i];
bins[(k*num_bins) + i] = total;
total += old;
}
}
}
#pragma omp barrier
/* Phase 3: Movement. */
for (rock_uint_t i = 0; i < size; i++) {
rock_uint_t ele = indx->v[indx_offset+i];
int val = (ele & mask) >> offset;
int pos = bins[bins_offset+val]++;
indx_alt->v[pos] = ele;
if (perm != NULL) {
perm_alt->v[pos] = perm->v[indx_offset+i];
}
}
offset += num_bits;
processed_bits += num_bits;
if (first_pass) {
first_pass = false;
}
#pragma omp barrier
#pragma omp master
memset(bins, 0, num_threads * num_bins * sizeof(rock_uint_t));
}
}
static inline void
indx_sort(rock_desc_t *desc,
rock_uint_t num_dims,
rock_uint_t *dims,
rock_perm_t *perm,
rock_perm_t *perm_alt,
rock_indx_t *indx,
rock_indx_t *indx_alt,
bool *swapped)
{
/* Buffer setup. */
rock_uint_t num_bins =
(rock_radix_bits > ROCK_MAX_SHIFT) ? ROCK_UINT_MAX :
(rock_uint_t) 1 << rock_radix_bits;
rock_uint_t *bins = NULL;
bool indx_alt_passed = true;
bool perm_alt_passed = true;
if (indx_alt == NULL) {
indx_alt_passed = false;
indx_alt = rock_indx_init(indx->len);
}
if (perm != NULL && perm_alt == NULL) {
perm_alt_passed = false;
perm_alt = rock_perm_init(perm->len);
}
if (swapped != NULL) {
*swapped = false;
}
#pragma omp parallel shared(indx, indx_alt, perm, perm_alt, bins)
{
/* Parallel setup. */
int id, num_threads;
int num_passes = 0;
id = omp_get_thread_num();
num_threads = omp_get_num_threads();
#pragma omp master
bins = calloc(num_threads * num_bins, sizeof(rock_uint_t));
#pragma omp barrier
rock_uint_t size, chunk, indx_offset, bins_offset;
chunk = indx->len / num_threads;
indx_offset = id * chunk;
bins_offset = id * num_bins;
if (id == num_threads - 1) {
size = indx->len - indx_offset;
} else {
size = chunk;
}
/* Sort. */
indx_sort_thread(desc, num_dims, dims, perm, perm_alt,
indx, indx_alt, swapped, size, num_bins, bins,
bins_offset, indx_offset, &num_passes);
#pragma omp master
{
/* Rearrange buffers and cleanup. */
bool odd_passes = num_passes % 2 != 0;
if (odd_passes && swapped != NULL) {
*swapped = true;
}
if (!indx_alt_passed) {
if (odd_passes) {
indx->len = indx_alt->len;
memcpy(indx->v, indx_alt->v,
indx_alt->len * sizeof(rock_uint_t));
}
rock_indx_free(indx_alt);
}
if (perm != NULL && !perm_alt_passed) {
if (odd_passes) {
perm->len = perm_alt->len;
memcpy(perm->v, perm_alt->v,
perm_alt->len * sizeof(rock_uint_t));
}
rock_perm_free(perm_alt);
}
free(bins);
}
}
}
void
rock_indx_sort(rock_desc_t *desc,
rock_uint_t num_dims,
rock_uint_t *dims,
rock_perm_t *perm,
rock_indx_t *indx)
{
rock_indx_sort_alt(desc, num_dims, dims, perm, NULL, indx, NULL, NULL);
}
void
rock_indx_sort_alt(rock_desc_t *desc,
rock_uint_t num_dims,
rock_uint_t *dims,
rock_perm_t *perm,
rock_perm_t *perm_alt,
rock_indx_t *indx,
rock_indx_t *indx_alt,
bool *swapped)
{
if (rock_radix_bits == ROCK_USE_DEFAULT) {
rock_radix_bits = ROCK_DEFAULT_RADIX_BITS;
}
/* Use single thread below threshold. */
if (indx->len <= ROCK_PARALLEL_THRESHOLD) {
omp_set_num_threads(1);
}
/* Allow manual override. */
if (rock_num_threads != ROCK_USE_DEFAULT) {
omp_set_num_threads(rock_num_threads);
}
indx_sort(desc, num_dims, dims, perm, perm_alt, indx, indx_alt, swapped);
}
|
IJMatrix_parcsr.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_procs;
HYPRE_Int i;
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i=0; i < 2; i++)
{
row_starts[i] = row_partitioning[i]- hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i=0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i=0; i < 2; i++)
{
col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i=0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
#else
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
if (row_partitioning[0])
{
for (i=0; i < num_procs+1; i++)
{
row_starts[i] = row_partitioning[i]-row_partitioning[0];
}
}
else
{
for (i=0; i < num_procs+1; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
if (col_partitioning[0])
{
for (i=0; i < num_procs+1; i++)
{
col_starts[i] = col_partitioning[i]-col_partitioning[0];
}
}
else
{
for (i=0; i < num_procs+1; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm,row_starts[num_procs],
col_starts[num_procs],
row_starts, col_starts, 0, 0, 0);
#endif
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_Int i, my_id;
HYPRE_Int *row_space;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_AuxParCSRMatrix *aux_matrix;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
row_space = NULL;
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes)
{
HYPRE_Int local_num_rows;
HYPRE_Int i;
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int *diag_i;
HYPRE_Int *offd_i;
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix);
}
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (!diag_i)
{
diag_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1, HYPRE_MEMORY_SHARED);
}
for (i = 0; i < local_num_rows; i++)
{
diag_i[i+1] = diag_i[i] + diag_sizes[i];
}
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[local_num_rows];
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (!offd_i)
{
offd_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1, HYPRE_MEMORY_SHARED);
}
for (i = 0; i < local_num_rows; i++)
{
offd_i[i+1] = offd_i[i] + offdiag_sizes[i];
}
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixNumNonzeros(offd) = offd_i[local_num_rows];
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(diag), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_Int local_num_rows;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
local_num_rows =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix));
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(
&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize(par_matrix);
hypre_AuxParCSRMatrixInitialize(aux_matrix);
if (! hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int i, *indx_diag, *indx_offd, *diag_i, *offd_i;
diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(par_matrix));
offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(par_matrix));
indx_diag = hypre_AuxParCSRMatrixIndxDiag(aux_matrix);
indx_offd = hypre_AuxParCSRMatrixIndxOffd(aux_matrix);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < local_num_rows; i++)
{
indx_diag[i] = diag_i[i];
indx_offd[i] = offd_i[i];
}
}
}
else /* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again*/
{
if (!aux_matrix)
{
local_num_rows =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix));
hypre_AuxParCSRMatrixCreate(
&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, pstart, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
pstart = 0;
#else
pstart = my_id;
#endif
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[pstart] &&
row_index < row_partitioning[pstart+1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[pstart]);
ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#endif
HYPRE_Int i, j, n, ii, indx, pstart;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_starts[my_id];
col_n = col_starts[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i=0; i < nrows; i++)
{
counter[i+1] = counter[i]+ncols[i];
}
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
row_size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i=0; i < nrows; i++)
{
ncols[i] = counter[i+1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* >>>>> Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. <<<< */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], HYPRE_MEMORY_HOST);
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
col_j = (HYPRE_Int)(cols[indx]-col_0);
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix;
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
hypre_CSRMatrix *diag, *offd;
HYPRE_Int *diag_i, *offd_i;
HYPRE_Complex *diag_data, *offd_data;
HYPRE_Int num_rows;
HYPRE_Int ii;
diag = hypre_ParCSRMatrixDiag(par_matrix);
offd = hypre_ParCSRMatrixOffd(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
offd_i = hypre_CSRMatrixI(offd);
diag_data = hypre_CSRMatrixData(diag);
offd_data = hypre_CSRMatrixData(offd);
num_rows = hypre_CSRMatrixNumRows(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii=0; ii < diag_i[num_rows]; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii=0; ii < offd_i[num_rows]; ii++)
{
offd_data[ii] = value;
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* >>>>> Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. <<<< */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], HYPRE_MEMORY_HOST);
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j;
for (j=diag_i[row_local]; j < diag_indx; j++)
{
col_j = (HYPRE_Int)( cols[indx] - col_0);
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, ii, j, j2, jj, n, row_index = 0;
HYPRE_BigInt row;
HYPRE_Int iii, iid, indx, ip;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends3;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_Int *chunks;
HYPRE_BigInt *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *dbl_send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_chunks;
HYPRE_BigInt *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *dbl_recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_BigInt *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJMatrixRowPartitioning(matrix);
info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
chunks = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
proc_id_mem = hypre_CTAlloc(HYPRE_Int, off_proc_i_indx/2, HYPRE_MEMORY_HOST);
j=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
//if (row < 0) row = -row-1;
n = (HYPRE_Int)off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[j++] = proc_id;
info[proc_id] += n;
chunks[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
dbl_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
num_sends3 = 3*num_sends;
int_buffer = hypre_CTAlloc(HYPRE_Int, 3*num_sends, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+2*chunks[i]+info[i];
dbl_send_map_starts[j] = dbl_send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = chunks[i];
int_buffer[j2++] = info[i];
}
}
hypre_TFree(chunks, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&num_sends3,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
{
displs[i] = displs[i-1]+info[i-1];
}
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(int_buffer,num_sends3,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=3)
{
if (recv_buf[j] == my_id)
{
num_recvs++;
}
}
recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_chunks = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
dbl_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
j2 = 0;
recv_vec_starts[0] = 0;
dbl_recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=3)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2] = i;
recv_chunks[j2++] = recv_buf[j+1];
recv_vec_starts[j2] = recv_vec_starts[j2-1]+2*recv_buf[j+1]
+recv_buf[j+2];
dbl_recv_vec_starts[j2] = dbl_recv_vec_starts[j2-1]+recv_buf[j+2];
}
if (j2 == num_recvs)
{
break;
}
}
}
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(displs, HYPRE_MEMORY_HOST);
/* set up data to be sent to send procs */
/* send_i contains for each send proc : row no., no. of elmts and column
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_data = hypre_CTAlloc(HYPRE_Complex, dbl_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
recv_data = hypre_CTAlloc(HYPRE_Complex, dbl_recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
j=0;
jj=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
n = (HYPRE_Int)off_proc_i[i];
proc_id = proc_id_mem[i/2];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
iid = dbl_send_map_starts[indx];
send_i[iii++] = row;
send_i[iii++] = (HYPRE_BigInt) n;
for (ii = 0; ii < n; ii++)
{
send_i[iii++] = off_proc_j[jj];
send_data[iid++] = off_proc_data[jj++];
}
send_map_starts[indx] = iii;
dbl_send_map_starts[indx] = iid;
}
hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
dbl_send_map_starts[i] = dbl_send_map_starts[i-1];
}
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
if (num_requests)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm,
&requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm,
&requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = dbl_recv_vec_starts[i];
vec_len = dbl_recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = dbl_send_map_starts[i];
vec_len = dbl_send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
}
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
hypre_TFree(send_procs, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(dbl_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_procs, HYPRE_MEMORY_HOST);
hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(dbl_recv_vec_starts, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
for (i=0; i < num_recvs; i++)
{
for (ii=0; ii < recv_chunks[i]; ii++)
{
row = recv_i[j];
HYPRE_Int rcvi = (HYPRE_Int) recv_i[j+1];
hypre_IJMatrixAddToValuesParCSR(matrix,1,&rcvi,&row,&row_index,
&recv_i[j+2],&recv_data[j2]);
j2 += recv_i[j+1];
j += recv_i[j+1]+2;
}
}
hypre_TFree(recv_chunks, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[i-1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter*2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_big_int = off_proc_j[counter+j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! >>>>>>> */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr =
(void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr =
(void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
hypre_IJMatrixAddToValuesParCSR(matrix,1,&num_elements,&row,&row_index,
col_ptr,col_data_ptr);
indx += (num_elements*2);
}
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
#endif
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength*object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
#else
HYPRE_BigInt base = col_partitioning[0];
#endif
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if(aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if(aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
#else
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
#endif
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array, *offd_array;
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
i_diag = 0;
i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = 0;
i_offd = 0;
for (i=ns; i < ne; i++)
{
local_j = aux_j[i];
local_data = aux_data[i];
diag_pos[i] = -1;
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j]-col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
if (hypre_CSRMatrixJ(diag))
{
hypre_TFree(hypre_CSRMatrixJ(diag), HYPRE_MEMORY_HOST);
}
if (hypre_CSRMatrixData(diag))
{
hypre_TFree(hypre_CSRMatrixData(diag), HYPRE_MEMORY_HOST);
}
if (hypre_CSRMatrixJ(offd))
{
hypre_TFree(hypre_CSRMatrixJ(offd), HYPRE_MEMORY_HOST);
}
if (hypre_CSRMatrixData(offd))
{
hypre_TFree(hypre_CSRMatrixData(offd), HYPRE_MEMORY_HOST);
}
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, HYPRE_MEMORY_SHARED);
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, HYPRE_MEMORY_SHARED);
if (i_offd > 0)
{
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, HYPRE_MEMORY_SHARED);
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, HYPRE_MEMORY_SHARED);
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, HYPRE_MEMORY_SHARED);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i=ns; i < ne; i++)
{
diag_i[i] = i_diag;
offd_i[i] = i_offd;
local_j = aux_j[i];
local_data = aux_data[i];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i=0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j=j0; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], HYPRE_MEMORY_SHARED);
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i=0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i=0; i < nnz_offd-1; i++)
{
if (tmp_j[i+1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i+1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < nnz_offd; i++)
{
offd_j[i]=hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd);
}
if (base)
{
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], HYPRE_MEMORY_HOST);
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], HYPRE_MEMORY_HOST);
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
grid.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef GRID_H_
#define GRID_H_
#include <assert.h>
#include <omp.h>
#include <algorithm>
#include <array>
#include <atomic>
#include <cmath>
#include <iostream>
#include <limits>
#include <vector>
#include "constant.h"
#include "fixed_size_vector.h"
#include "inline_vector.h"
#include "log.h"
#include "param.h"
#include "simulation_object_vector.h"
namespace bdm {
/// FIFO data structure. Stores max N number of objects.
/// Adding more elements will overwrite the oldest ones.
template <typename T, uint64_t N>
class CircularBuffer {
public:
CircularBuffer() {
for (uint64_t i = 0; i < N; i++) {
data_[i] = T();
}
}
void clear() { // NOLINT
position_ = 0;
for (uint64_t i = 0; i < N; i++) {
data_[i].clear();
}
}
void push_back(const T& data) { // NOLINT
data_[position_] = data;
Increment();
}
T& operator[](uint64_t idx) { return data_[(idx + position_) % N]; }
const T& operator[](uint64_t idx) const {
return data_[(idx + position_) % N];
}
/// Calling function `End` and afterwards `Increment` is equivalent to
/// `push_back`, but avoids copying data
T* End() { return &(data_[position_]); }
void Increment() {
position_++;
position_ %= N;
}
private:
T data_[N];
uint64_t position_ = 0;
};
/// A class that represents Cartesian 3D grid
template <typename TSimulation = Simulation<>>
class Grid {
public:
/// A single unit cube of the grid
struct Box {
/// start value of the linked list of simulatio objects inside this box.
/// Next element can be found at `successors_[start_]`
std::atomic<SoHandle> start_;
/// length of the linked list (i.e. number of simulation objects)
std::atomic<uint16_t> length_;
Box() : start_(SoHandle()), length_(0) {}
/// Copy Constructor required for boxes_.resize()
/// Since box values will be overwritten afterwards it forwards to the
/// default ctor
Box(const Box& other) : Box() {}
/// Required for boxes_.resize
/// Since box values will be overwritten afterwards, implementation is
/// missing
const Box& operator=(const Box& other) const { return *this; }
bool IsEmpty() const { return length_ == 0; }
/// @brief Adds a simulation object to this box
///
/// @param[in] obj_id The object's identifier
/// @param successors The successors
///
/// @tparam TSuccessors Type of successors
///
template <typename TSimulationObjectVector>
void AddObject(SoHandle obj_id, TSimulationObjectVector* successors) {
length_++;
auto old_start = std::atomic_exchange(&start_, obj_id);
if (old_start != SoHandle()) {
(*successors)[obj_id] = old_start;
}
}
/// An iterator that iterates over the cells in this box
struct Iterator {
Iterator(Grid* grid, const Box* box)
: grid_(grid),
current_value_(box->start_),
countdown_(box->length_) {}
bool IsAtEnd() { return countdown_ <= 0; }
Iterator& operator++() {
countdown_--;
if (countdown_ > 0) {
current_value_ = grid_->successors_[current_value_];
}
return *this;
}
const SoHandle& operator*() const { return current_value_; }
/// Pointer to the neighbor grid; for accessing the successor_ list
Grid<TSimulation>* grid_;
/// The current simulation object to be considered
SoHandle current_value_;
/// The remain number of simulation objects to consider
int countdown_ = 0;
};
template <typename TGrid = Grid<TSimulation>>
Iterator begin() const { // NOLINT
return Iterator(TSimulation::GetActive()->GetGrid(), this);
}
};
/// An iterator that iterates over the boxes in this grid
struct NeighborIterator {
explicit NeighborIterator(
const FixedSizeVector<const Box*, 27>& neighbor_boxes)
: neighbor_boxes_(neighbor_boxes),
// start iterator from box 0
box_iterator_(neighbor_boxes_[0]->begin()) {
// if first box is empty
if (neighbor_boxes_[0]->IsEmpty()) {
ForwardToNonEmptyBox();
}
}
bool IsAtEnd() const { return is_end_; }
const SoHandle& operator*() const { return *box_iterator_; }
/// Version where empty neighbor boxes are allowed
NeighborIterator& operator++() {
++box_iterator_;
// if iterator of current box has come to an end, continue with next box
if (box_iterator_.IsAtEnd()) {
return ForwardToNonEmptyBox();
}
return *this;
}
private:
/// The 27 neighbor boxes that will be searched for simulation objects
const FixedSizeVector<const Box*, 27>& neighbor_boxes_;
/// The box that shall be considered to iterate over for finding simulation
/// objects
typename Box::Iterator box_iterator_;
/// The id of the box to be considered (i.e. value between 0 - 26)
uint16_t box_idx_ = 0;
/// Flag to indicate that all the neighbor boxes have been searched through
bool is_end_ = false;
/// Forwards the iterator to the next non empty box and returns itself
/// If there are no non empty boxes is_end_ is set to true
NeighborIterator& ForwardToNonEmptyBox() {
// increment box id until non empty box has been found
while (++box_idx_ < neighbor_boxes_.size()) {
// box is empty or uninitialized (padding box) -> continue
if (neighbor_boxes_[box_idx_]->IsEmpty()) {
continue;
}
// a non-empty box has been found
box_iterator_ = neighbor_boxes_[box_idx_]->begin();
return *this;
}
// all remaining boxes have been empty; reached end
is_end_ = true;
return *this;
}
};
/// Enum that determines the degree of adjacency in search neighbor boxes
// todo(ahmad): currently only kHigh is supported (hardcoded 26 several
// places)
enum Adjacency {
kLow, /**< The closest 8 neighboring boxes */
kMedium, /**< The closest 18 neighboring boxes */
kHigh /**< The closest 26 neighboring boxes */
};
using ResourceManager_t = typename TSimulation::ResourceManager_t;
Grid() {}
Grid(Grid const&) = delete;
void operator=(Grid const&) = delete;
/// @brief Initialize the grid with the given simulation objects
/// @param[in] adjacency The adjacency (see #Adjacency)
void Initialize(Adjacency adjacency = kHigh) {
adjacency_ = adjacency;
int32_t inf = std::numeric_limits<int32_t>::max();
threshold_dimensions_ = {inf, -inf};
UpdateGrid();
initialized_ = true;
}
virtual ~Grid() {}
/// Clears the grid
void ClearGrid() {
boxes_.clear();
box_length_ = 1;
largest_object_size_ = 0;
num_boxes_axis_ = {{0}};
num_boxes_xy_ = 0;
int32_t inf = std::numeric_limits<int32_t>::max();
grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf};
successors_.clear();
has_grown_ = false;
}
/// Updates the grid, as simulation objects may have moved, added or deleted
void UpdateGrid() {
ClearGrid();
auto inf = Constant::kInfinity;
std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}};
CalculateGridDimensions(&tmp_dim);
RoundOffGridDimensions(tmp_dim);
auto los = ceil(largest_object_size_);
assert(los > 0 &&
"The largest object size was found to be 0. Please check if your "
"cells are correctly initialized.");
box_length_ = los;
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
int r = dimension_length % box_length_;
// If the grid is not perfectly divisible along each dimension by the
// resolution, extend the grid so that it is
if (r != 0) {
// std::abs for the case that box_length_ > dimension_length
grid_dimensions_[2 * i + 1] += (box_length_ - r);
} else {
// Else extend the grid dimension with one row, because the outmost
// object lies exactly on the border
grid_dimensions_[2 * i + 1] += box_length_;
}
}
// Pad the grid to avoid out of bounds check when search neighbors
for (int i = 0; i < 3; i++) {
grid_dimensions_[2 * i] -= box_length_;
grid_dimensions_[2 * i + 1] += box_length_;
}
// Calculate how many boxes fit along each dimension
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
assert((dimension_length % box_length_ == 0) &&
"The grid dimensions are not a multiple of its box length");
num_boxes_axis_[i] = dimension_length / box_length_;
}
num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1];
auto total_num_boxes = num_boxes_xy_ * num_boxes_axis_[2];
CheckGridGrowth();
if (boxes_.size() != total_num_boxes) {
boxes_.resize(total_num_boxes, Box());
}
successors_.Initialize();
// Assign simulation objects to boxes
auto* rm = TSimulation::GetActive()->GetResourceManager();
rm->ApplyOnAllElementsParallel([this](auto&& sim_object, SoHandle id) {
const auto& position = sim_object.GetPosition();
auto idx = this->GetBoxIndex(position);
auto box = this->GetBoxPointer(idx);
box->AddObject(id, &successors_);
sim_object.SetBoxIdx(idx);
});
}
void CheckGridGrowth() {
// Determine if the grid dimensions have changed (changed in the sense that
// the grid has grown outwards)
auto min_gd =
*std::min_element(grid_dimensions_.begin(), grid_dimensions_.end());
auto max_gd =
*std::max_element(grid_dimensions_.begin(), grid_dimensions_.end());
if (min_gd < threshold_dimensions_[0]) {
threshold_dimensions_[0] = min_gd;
has_grown_ = true;
}
if (max_gd > threshold_dimensions_[1]) {
Log::Info("Grid",
"Your simulation objects are getting near the edge of "
"the simulation space. Be aware of boundary conditions that "
"may come into play!");
threshold_dimensions_[1] = max_gd;
has_grown_ = true;
}
}
/// Calculates what the grid dimensions need to be in order to contain all the
/// simulation objects
void CalculateGridDimensions(std::array<double, 6>* ret_grid_dimensions) {
auto* rm = TSimulation::GetActive()->GetResourceManager();
const auto max_threads = omp_get_max_threads();
std::vector<std::array<double, 6>*> all_grid_dimensions(max_threads,
nullptr);
std::vector<double*> all_largest_object_size(max_threads, nullptr);
#pragma omp parallel
{
auto thread_id = omp_get_thread_num();
auto* grid_dimensions = new std::array<double, 6>;
*grid_dimensions = {{Constant::kInfinity, -Constant::kInfinity,
Constant::kInfinity, -Constant::kInfinity,
Constant::kInfinity, -Constant::kInfinity}};
double* largest_object_size = new double;
*largest_object_size = 0;
all_grid_dimensions[thread_id] = grid_dimensions;
all_largest_object_size[thread_id] = largest_object_size;
rm->ApplyOnAllTypes([&](auto* sim_objects, uint16_t type_idx) {
#pragma omp for
for (size_t i = 0; i < sim_objects->size(); i++) {
const auto& position = (*sim_objects)[i].GetPosition();
for (size_t j = 0; j < 3; j++) {
if (position[j] < (*grid_dimensions)[2 * j]) {
(*grid_dimensions)[2 * j] = position[j];
}
if (position[j] > (*grid_dimensions)[2 * j + 1]) {
(*grid_dimensions)[2 * j + 1] = position[j];
}
}
auto diameter = (*sim_objects)[i].GetDiameter();
if (diameter > *largest_object_size) {
*largest_object_size = diameter;
}
}
});
#pragma omp master
{
for (int i = 0; i < max_threads; i++) {
for (size_t j = 0; j < 3; j++) {
if ((*all_grid_dimensions[i])[2 * j] <
(*ret_grid_dimensions)[2 * j]) {
(*ret_grid_dimensions)[2 * j] = (*all_grid_dimensions[i])[2 * j];
}
if ((*all_grid_dimensions[i])[2 * j + 1] >
(*ret_grid_dimensions)[2 * j + 1]) {
(*ret_grid_dimensions)[2 * j + 1] =
(*all_grid_dimensions[i])[2 * j + 1];
}
}
if ((*all_largest_object_size[i]) > largest_object_size_) {
largest_object_size_ = *(all_largest_object_size[i]);
}
}
}
}
for (auto element : all_grid_dimensions) {
delete element;
}
for (auto element : all_largest_object_size) {
delete element;
}
}
void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) {
assert(grid_dimensions_[0] > -9.999999999);
assert(grid_dimensions_[2] > -9.999999999);
assert(grid_dimensions_[4] > -9.999999999);
assert(grid_dimensions_[1] < 80);
assert(grid_dimensions_[3] < 80);
assert(grid_dimensions_[5] < 80);
grid_dimensions_[0] = floor(grid_dimensions[0]);
grid_dimensions_[2] = floor(grid_dimensions[2]);
grid_dimensions_[4] = floor(grid_dimensions[4]);
grid_dimensions_[1] = ceil(grid_dimensions[1]);
grid_dimensions_[3] = ceil(grid_dimensions[3]);
grid_dimensions_[5] = ceil(grid_dimensions[5]);
}
/// @brief Calculates the squared euclidian distance between two points
/// in 3D
///
/// @param[in] pos1 Position of the first point
/// @param[in] pos2 Position of the second point
///
/// @return The distance between the two points
///
inline double SquaredEuclideanDistance(
const std::array<double, 3>& pos1,
const std::array<double, 3>& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dy = pos2[1] - pos1[1];
const double dz = pos2[2] - pos1[2];
return (dx * dx + dy * dy + dz * dz);
}
/// @brief Applies the given lambda to each neighbor
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param simulation_object_id
///
/// @tparam Lambda The type of the lambda operation
/// @tparam SO The type of the simulation object
///
template <typename Lambda, typename SO>
void ForEachNeighbor(const Lambda& lambda, const SO& query,
const SoHandle& simulation_object_id) const {
const auto& position = query.GetPosition();
auto idx = GetBoxIndex(position);
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
NeighborIterator ni(neighbor_boxes);
while (!ni.IsAtEnd()) {
// Do something with neighbor object
if (*ni != simulation_object_id) {
lambda(*ni);
}
++ni;
}
}
/// @brief Applies the given lambda to each neighbor or the specified
/// simulation object
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param simulation_object_id
/// @param[in] squared_radius The search radius squared
///
/// @tparam Lambda The type of the lambda operation
/// @tparam SO The type of the simulation object
///
template <typename Lambda, typename SO>
void ForEachNeighborWithinRadius(const Lambda& lambda, const SO& query,
const SoHandle& simulation_object_id,
double squared_radius) {
const auto& position = query.GetPosition();
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
NeighborIterator ni(neighbor_boxes);
auto* rm = TSimulation::GetActive()->GetResourceManager();
while (!ni.IsAtEnd()) {
// Do something with neighbor object
SoHandle neighbor_handle = *ni;
if (neighbor_handle != simulation_object_id) {
rm->ApplyOnElement(neighbor_handle, [&](auto&& sim_object) {
const auto& neighbor_position = sim_object.GetPosition();
if (this->SquaredEuclideanDistance(position, neighbor_position) <
squared_radius) {
lambda(sim_object, neighbor_handle);
}
});
}
++ni;
}
}
/// This function calls the given lambda exactly once for every cell pair
/// if the distance is smaller than `(squared_radius) ^ 1/2`
///
/// // usage:
/// grid.ForEachNeighborPairWithinRadius([](auto&& lhs, SoHandle lhs_id,
/// auto&& rhs, SoHandle rhs_id) {
/// ...
/// }, squared_radius);
///
/// // using lhs_id and rhs_id to index into an array is thread-safe
/// SimulationObjectVector<std::array<double, 3>> total_force;
/// grid.ForEachNeighborPairWithinRadius([&](auto&& lhs, SoHandle lhs_id,
/// auto&& rhs, SoHandle rhs_id) {
/// auto force = ...;
/// total_force[lhs_id] += force;
/// total_force[rhs_id] -= force;
/// }, squared_radius);
///
/// // the following example leads to a race condition
///
/// int counter = 0;
/// grid.ForEachNeighborPairWithinRadius([&](auto&& lhs, SoHandle lhs_id,
/// auto&& rhs, SoHandle rhs_id) {
/// counter++;
/// }, squared_radius);
/// // which can be solved by using std::atomic<int> counter; instead
template <typename TLambda>
void ForEachNeighborPairWithinRadius(const TLambda& lambda,
double squared_radius) const {
uint32_t z_start = 0, y_start = 0;
auto* rm = TSimulation::GetActive()->GetResourceManager();
// use special iteration pattern to avoid race conditions between neighbors
// main iteration will be done over rows of boxes. In order to avoid two
// threads accessing the same box, one has to use a margin reagion of two
// boxes in the y and z dimension.
for (uint16_t i = 0; i < 9; i++) {
switch (i) {
case 0:
z_start = 1;
y_start = 1;
break;
case 1:
z_start = 1;
y_start = 2;
break;
case 2:
z_start = 1;
y_start = 3;
break;
case 3:
z_start = 2;
y_start = 1;
break;
case 4:
z_start = 2;
y_start = 2;
break;
case 5:
z_start = 2;
y_start = 3;
break;
case 6:
z_start = 3;
y_start = 1;
break;
case 7:
z_start = 3;
y_start = 2;
break;
case 8:
z_start = 3;
y_start = 3;
break;
}
#pragma omp parallel
{
FixedSizeVector<size_t, 14> box_indices;
CircularBuffer<InlineVector<SoHandle, 4>, 14> cached_so_handles;
CircularBuffer<InlineVector<std::array<double, 3>, 4>, 14>
cached_positions;
#pragma omp for collapse(2) schedule(dynamic, 1) firstprivate(z_start, y_start)
for (uint32_t z = z_start; z < num_boxes_axis_[2] - 1; z += 3) {
for (uint32_t y = y_start; y < num_boxes_axis_[1] - 1; y += 3) {
auto current_box_idx =
GetBoxIndex(std::array<uint32_t, 3>{1, y, z});
box_indices.clear();
cached_so_handles.clear();
cached_positions.clear();
// since we want to execute the lambda ONCE for each cell pair,
// it is sufficient to process half of the boxes and omit the
// opposite box
// Order: C BW FNW NW BNW B FN N BN E BE FNE NE BNE
GetHalfMooreBoxIndices(&box_indices, current_box_idx);
CacheSoHandles(box_indices, &cached_so_handles);
CachePositions(cached_so_handles, rm, &cached_positions);
// first iteration peeled off
ForEachNeighborPairWithCenterBox(lambda, rm, cached_so_handles,
cached_positions, 0,
squared_radius);
for (uint32_t x = 2; x < num_boxes_axis_[0] - 1; x++) {
// since every thread is iterating over a complete row, we can
// save time by updating only new boxes
++box_indices;
UpdateSoHandles(box_indices, &cached_so_handles);
UpdateCachedPositions(cached_so_handles, rm, &cached_positions);
// updating `cached_so_handles` and `cached_positions` will change
// the
// position of the center box from 0 to 4
ForEachNeighborPairWithCenterBox(lambda, rm, cached_so_handles,
cached_positions, 4,
squared_radius);
}
}
}
}
}
}
/// @brief Return the box index in the one dimensional array of the box
/// that contains the position
///
/// @param[in] position The position of the object
///
/// @return The box index.
///
size_t GetBoxIndex(const std::array<double, 3>& position) const {
std::array<uint32_t, 3> box_coord;
box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_;
box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_;
box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_;
return GetBoxIndex(box_coord);
}
void SetDimensionThresholds(int32_t min, int32_t max) {
threshold_dimensions_[0] = min;
threshold_dimensions_[1] = max;
}
/// Gets the size of the largest object in the grid
double GetLargestObjectSize() const { return largest_object_size_; }
std::array<int32_t, 6>& GetDimensions() { return grid_dimensions_; }
std::array<int32_t, 2>& GetDimensionThresholds() {
return threshold_dimensions_;
}
std::array<uint32_t, 3>& GetNumBoxes() { return num_boxes_axis_; }
uint32_t GetBoxLength() { return box_length_; }
bool HasGrown() { return has_grown_; }
std::array<uint32_t, 3> GetBoxCoordinates(size_t box_idx) const {
std::array<uint32_t, 3> box_coord;
box_coord[2] = box_idx / num_boxes_xy_;
auto remainder = box_idx % num_boxes_xy_;
box_coord[1] = remainder / num_boxes_axis_[0];
box_coord[0] = remainder % num_boxes_axis_[0];
return box_coord;
}
bool IsInitialized() { return initialized_; }
/// @brief Gets the successor list
///
/// @param successors The successors
///
/// @tparam TUint32 A uint32 type (could also be cl_uint)
///
template <typename TUint32>
void GetSuccessors(std::vector<TUint32>* successors) {
uint16_t type = 0;
for (size_t i = 0; i < successors_.size(type); i++) {
auto sh = SoHandle(type, i);
(*successors)[i] = successors_[sh].GetElementIdx();
}
}
/// @brief Gets information about the grid boxes (i.e. which simulation
/// objects reside in each box, wich can be retrieved in
/// combination with the successor list
///
/// @param starts The gpu starts
/// @param lengths The gpu lengths
///
/// @tparam TUint32 A uint32 type (could also be cl_uint)
/// @tparam TUint16 A uint32 type (could also be cl_ushort)
///
template <typename TUint32, typename TUint16>
void GetBoxInfo(std::vector<TUint32>* starts, std::vector<TUint16>* lengths) {
starts->resize(boxes_.size());
lengths->resize(boxes_.size());
size_t i = 0;
for (auto& box : boxes_) {
(*starts)[i] = box.start_.load().GetElementIdx();
(*lengths)[i] = box.length_;
i++;
}
}
/// @brief Gets the information about the grid
///
/// @param box_length The grid's box length
/// @param num_boxes_axis The number boxes along each axis of the grid
/// @param grid_dimensions The grid's dimensions
///
/// @tparam TUint32 A uint32 type (could also be cl_uint)
/// @tparam TInt32 A int32 type (could be cl_int)
///
template <typename TUint32, typename TInt32>
void GetGridInfo(TUint32* box_length, std::array<TUint32, 3>* num_boxes_axis,
std::array<TInt32, 3>* grid_dimensions) {
box_length[0] = box_length_;
(*num_boxes_axis)[0] = num_boxes_axis_[0];
(*num_boxes_axis)[1] = num_boxes_axis_[1];
(*num_boxes_axis)[2] = num_boxes_axis_[2];
(*grid_dimensions)[0] = grid_dimensions_[0];
(*grid_dimensions)[1] = grid_dimensions_[2];
(*grid_dimensions)[2] = grid_dimensions_[4];
}
private:
/// The vector containing all the boxes in the grid
std::vector<Box> boxes_;
/// Length of a Box
uint32_t box_length_ = 1;
/// Stores the number of boxes for each axis
std::array<uint32_t, 3> num_boxes_axis_ = {{0}};
/// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1])
size_t num_boxes_xy_ = 0;
/// Implements linked list - array index = key, value: next element
///
/// // Usage
/// SoHandle current_element = ...;
/// SoHandle next_element = successors_[current_element];
SimulationObjectVector<SoHandle, TSimulation> successors_;
/// Determines which boxes to search neighbors in (see enum Adjacency)
Adjacency adjacency_;
/// The size of the largest object in the simulation
double largest_object_size_ = 0;
/// Cube which contains all simulation objects
/// {x_min, x_max, y_min, y_max, z_min, z_max}
std::array<int32_t, 6> grid_dimensions_;
/// Stores the min / max dimension value that need to be surpassed in order
/// to trigger a diffusion grid change
std::array<int32_t, 2> threshold_dimensions_;
// Flag to indicate that the grid dimensions have increased
bool has_grown_ = false;
/// Flag to indicate if the grid has been initialized or not
bool initialized_ = false;
/// @brief Gets the Moore (i.e adjacent) boxes of the query box
///
/// @param neighbor_boxes The neighbor boxes
/// @param[in] box_idx The query box
///
void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes,
size_t box_idx) const {
neighbor_boxes->push_back(GetBoxPointer(box_idx));
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - 1));
neighbor_boxes->push_back(GetBoxPointer(box_idx + 1));
}
// Adjacent 12
if (adjacency_ >= kMedium) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] + 1));
}
// Adjacent 8
if (adjacency_ >= kHigh) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1));
}
}
/// Determines current box based on parameter box_idx and adds it together
/// with half of the surrounding boxes to the vector.
/// Legend: C = center, N = north, E = east, S = south, W = west, F = front,
/// B = back
/// For each box pair which is centro-symmetric only one box is taken --
/// e.g. E-W: E, or BNW-FSE: BNW
/// NB: for the update mechanism using a CircularBuffer the order is
/// important.
///
/// (x-axis to the right \ y-axis up)
/// z=1
/// +-----+----+-----+
/// | BNW | BN | BNE |
/// +-----+----+-----+
/// | NW | N | NE |
/// +-----+----+-----+
/// | FNW | FN | FNE |
/// +-----+----+-----+
///
/// z = 0
/// +-----+----+-----+
/// | BW | B | BE |
/// +-----+----+-----+
/// | W | C | E |
/// +-----+----+-----+
/// | FW | F | FE |
/// +-----+----+-----+
///
/// z = -1
/// +-----+----+-----+
/// | BSW | BS | BSE |
/// +-----+----+-----+
/// | SW | S | SE |
/// +-----+----+-----+
/// | FSW | FS | FSE |
/// +-----+----+-----+
///
void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes,
size_t box_idx) const {
// C
neighbor_boxes->push_back(box_idx);
// BW
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1);
// FNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
// NW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1);
// BNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
// B
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]);
// FN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
// N
neighbor_boxes->push_back(box_idx + num_boxes_xy_);
// BN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
// E
neighbor_boxes->push_back(box_idx + 1);
// BE
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1);
// FNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
// NE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1);
// BNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); }
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
Box* GetBoxPointer(size_t index) { return &(boxes_[index]); }
/// Returns the box index in the one dimensional array based on box
/// coordinates in space
///
/// @param box_coord box coordinates in space (x, y, z)
///
/// @return The box index.
///
size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const {
return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] +
box_coord[0];
}
/// Obtains all simulation object handles in a given box
void GetSoHandles(size_t box_idx, InlineVector<SoHandle, 4>* handles) const {
auto size = boxes_[box_idx].length_.load(std::memory_order_relaxed);
if (size == 0) {
return;
}
auto current = boxes_[box_idx].start_.load(std::memory_order_relaxed);
for (size_t i = 0; i < size - 1u; i++) {
handles->push_back(current);
current = successors_[current];
}
handles->push_back(current);
}
/// Obtains all simulation object handles for the given box indices and puts
/// them in a CircularBuffer.
void CacheSoHandles(
const FixedSizeVector<size_t, 14>& box_indices,
CircularBuffer<InlineVector<SoHandle, 4>, 14>* so_handles) const {
for (uint64_t i = 0; i < box_indices.size(); i++) {
GetSoHandles(box_indices[i], so_handles->End());
so_handles->Increment();
}
}
/// Obtains all simulation object positions for the given box indices and puts
/// them in a CircularBuffer
void CachePositions(
const CircularBuffer<InlineVector<SoHandle, 4>, 14>& so_handles,
ResourceManager_t* rm,
CircularBuffer<InlineVector<std::array<double, 3>, 4>, 14>* pos_cache)
const {
for (uint64_t i = 0; i < 14; i++) {
const auto& current_box_sos = so_handles[i];
auto current_pos = pos_cache->End();
for (uint64_t j = 0; j < current_box_sos.size(); j++) {
rm->ApplyOnElement(current_box_sos[j], [&](auto&& element) {
current_pos->push_back(element.GetPosition());
});
}
pos_cache->Increment();
}
}
/// Updates the cached simulation object handles exploiting the fact that
/// ForEachNeighbor... iterates in rows. Therefore, so_handles from many boxes
/// can be reused if the x-axis is incremented.
/// \param box_indices box indices are expected to be in the following order
/// `C BW FNW NW BNW B FN N BN E BE FNE NE BNE`
/// \param so_handles simulation object handles from the previous iteration
/// input expected to be in either
/// `C BW FNW NW BNW B FN N BN E BE FNE NE BNE` or
/// `BW FNW NW BNW C B FN N BN E BE FNE NE BNE` form. Output will be
/// in `BW FNW NW BNW C B FN N BN E BE FNE NE BNE` form.
void UpdateSoHandles(
const FixedSizeVector<size_t, 14>& box_indices,
CircularBuffer<InlineVector<SoHandle, 4>, 14>* so_handles) const {
for (uint64_t i = 9; i < 14; i++) {
auto handles = so_handles->End();
handles->clear();
GetSoHandles(box_indices[i], handles);
so_handles->Increment();
}
}
/// Updates the cached simulation object positions exploiting the fact that
/// ForEachNeighbor... iterates in rows. Therefore, positions from many boxes
/// can be reused if the x-axis is incremented.
/// \param so_handles simulation object handles are expected to be in the
/// following order `C BW FNW NW BNW B FN N BN E BE FNE NE BNE`
/// \param rm The resource manager
/// \param positions simulation object positions from the previous iteration
/// input expected to be in either
/// `C BW FNW NW BNW B FN N BN E BE FNE NE BNE` or
/// `BW FNW NW BNW C B FN N BN E BE FNE NE BNE` form. Output will be
/// in `BW FNW NW BNW C B FN N BN E BE FNE NE BNE` form.
void UpdateCachedPositions(
const CircularBuffer<InlineVector<SoHandle, 4>, 14>& so_handles,
ResourceManager_t* rm,
CircularBuffer<InlineVector<std::array<double, 3>, 4>, 14>* positions)
const {
for (uint64_t i = 9; i < 14; i++) {
const auto& current_box_sos = so_handles[i];
auto current_pos = positions->End();
current_pos->clear();
for (uint64_t j = 0; j < current_box_sos.size(); j++) {
rm->ApplyOnElement(current_box_sos[j], [&](auto&& element) {
current_pos->push_back(element.GetPosition());
});
}
positions->Increment();
}
}
/// Calls lambda for each cell pair for the specified box
///
/// \param lambda function that should be executed for each pair
/// \param rm ResourceManager
/// \param so_handles cached simulation object handles for the center box and
/// half of its surrounding boxes
/// \param positions cached simulation object positions for the center box and
/// half of its surrounding boxes
/// \param center_box_idx index into `so_handles` and `positions` to
/// determine the center box
/// \param squared_radius determines cutoff distance - lambda will only be
/// called if distance between neighbors squared is smaller than
/// squared_radius
template <typename TLambda>
void ForEachNeighborPairWithCenterBox(
const TLambda& lambda, ResourceManager_t* rm,
const CircularBuffer<InlineVector<SoHandle, 4>, 14>& so_handles,
const CircularBuffer<InlineVector<std::array<double, 3>, 4>, 14>&
positions,
uint64_t center_box_idx, double squared_radius) const {
const auto& soh_center_box = so_handles[center_box_idx];
const auto& pos_center_box = positions[center_box_idx];
if (soh_center_box.size() == 0) {
return;
}
if (soh_center_box.size() > 1) {
// pairs within the center box
for (size_t n = 0; n < soh_center_box.size(); n++) {
rm->ApplyOnElement(soh_center_box[n], [&, this](auto&& element_n) {
const auto& pos_n = pos_center_box[n];
for (size_t c = n + 1; c < soh_center_box.size(); c++) {
rm->ApplyOnElement(soh_center_box[c], [&, this](auto&& element_c) {
const auto& pos_c = pos_center_box[c];
if (this->SquaredEuclideanDistance(pos_c, pos_n) <
squared_radius) {
lambda(element_c, soh_center_box[c], element_n,
soh_center_box[n]);
}
});
}
});
}
}
// pairs with one cell in the center box and the other in a surrounding one
for (size_t i = 0; i < 14; i++) {
if (i == center_box_idx) {
continue;
}
const auto& soh_box = so_handles[i];
const auto& pos_box = positions[i];
for (size_t n = 0; n < soh_box.size(); n++) {
rm->ApplyOnElement(soh_box[n], [&, this](auto&& element_n) {
const auto& pos_n = pos_box[n];
for (size_t c = 0; c < soh_center_box.size(); c++) {
rm->ApplyOnElement(soh_center_box[c], [&, this](auto&& element_c) {
const auto& pos_c = pos_center_box[c];
if (this->SquaredEuclideanDistance(pos_c, pos_n) <
squared_radius) {
lambda(element_c, soh_center_box[c], element_n, soh_box[n]);
}
});
}
});
}
}
}
};
} // namespace bdm
#endif // GRID_H_
|
DPBB.h | /*
(c) Yaroslav Salii & the Parliament of Owls, 2019+
License:sort of CC---if you go commercial, contact me
DP-BB v.1.0 2019-02-06; DP-BB solution, also logging for the number of states fathomed
branch & bound scheme for dynamic programming; requires a lower bound function
and an upper bound (value); may be reminiscent of my OP-PC code
*/
#ifndef DPBB_H_
#define DPBB_H_
#include"RTDP.h"
#include"lower-bound.h"
#include<functional>//for std::greater
//======NAMECALLING==========================/
inline std::string mk_DPBB_Code(const t_Direction iDirn, const std::string iLB_Code)
{
return mkSlnCode(getDirectionCode(iDirn), "DPBB", iLB_Code);
}
//lower-bound.h::elCheapoLB is CHP; thus, FWD-DPBB-CHP
//-------------------------------------------/
//==EXACT==DP==WITH==BRANCH=&=BOUND==========/
using t_BBDP =
struct t_BBDP : public t_DP
{
const t_cost UB; //upper bound (a value)
const std::string LB_Code; // lower bound algorithm's name; consider a pointer this funciton...
size_t nFathomedStatesTotal{ 0 };
//initialization like in ordinary DP
t_BBDP(const t_Instance& iproblem, const t_Direction iDIR, const t_cost iUB, const std::string iLB_Code)
: t_DP(iproblem, iDIR, mk_DPBB_Code(iDIR, iLB_Code))
, UB(iUB) {};
//========STATE==FATHOMING:==STATE_VAL+CO-STATE_LB<UB========/
bool notFathomed_strictly(const t_cost stValue, const t_cost iLB, const t_cost iUB)
{
if (p.f.cAgr(stValue, iLB) <= iUB)//not aware of direction; might fail for non-commutative aggregation
return true; else return false;
}
//==========B&B-AWARE====BF====COMPUTATION===========================/
size_t compExpandBF(const t_bin& K //taskset
, const t_bin& EK //its feasible expansions:its interfaces
, const t_stLayer& prevL//prev. layer, for computing BF
, t_stLayer& thisL//this layer, to fill in the BF's values
, t_stLayer& nextL//next layer, for generating the next taskset(s)
, t_2clear& to_clear_nextL)//next layer, for generating the next taskset(s)
{
size_t nFathSt = 0;
foreach_elt(m, EK, p.dim)//for each expanding city
{
if(nextL.find(K | (BIT0 << m))==nextL.end() ) //if we haven't yet stumbled upon such a state
// if(to_clear_nextL.contains(K | (BIT0 << m))) //if we haven't yet stumbled upon such a state
{
bool canExpandWith_m = false;//default to ``can't expand:over budget''
//for each (exit) point of the expanding city
foreach_point(x, p.popInfo[m].pfirst, p.popInfo[m].plast)
{
if (auto xK_Cost = minmin(x, K, prevL, p, D)
;notFathomed_strictly(xK_Cost, elCheapoLB(x, setMinus(p.wkOrd.omask, K).reset(m), p, D).first,this->UB))
{
canExpandWith_m = true;
thisL[K].emplace(x, xK_Cost);
}
else nFathSt++;//so it's fathomed; increase the counter
}//next (exit) x from the city m
//expand the next layer with m if it's worth it (if at least one state is not over budget)
if (canExpandWith_m)
{
// #pragma omp critical(statewrite)//prevent concurrent writes
// nextL[K | (BIT0 << m)].clear();
to_clear_nextL.insert((K | (BIT0 << m)));
}
}
}//next expanding city m\in EK
return nFathSt;//tell the caller how many states were fathomed in view of UB and elCheapoLB
}
static void collect_garbage(t_2clear& to_clear, t_stLayer& nextL) {
for (auto &thread_queue : to_clear) {
nextL[thread_queue].clear();
// for (auto key : thread_queue.second) {
// nextL[key].clear();
// }
}
}
//==========THE=WHOLE=SOLUTION=PROCEDURE====================/
t_solution solve()
{
mtag l = 1; //current layer number
for (l = 1; l < p.dim; l++)//for layers 1..dim-1;
{
t_2clear to_clear;
//for each task set (ideal/filter) of cardinality l
size_t nStates = 0;//to count the states at this layer
size_t nFathomedStates = 0; //to count the states FATHOMED at this layer
//=================OMP=========TASKS====================/
#pragma omp parallel default (shared)
#pragma omp single nowait
for (auto ts : layer[l])//implemented as ts.first; .second is for the states
{
/*recall: FWD:coMin, BWD:coMax
t_bin fexp = D.gE(ts.first, p.ord, p.wkOrd);*/
//compute (BF) for all (x,K): x\in fexp, K=ts.first; it returns the number of FATHOMED states
#pragma omp task untied firstprivate(ts)
{
nFathomedStates+=compExpandBF(ts.first
, D.gE(ts.first, p.ord, p.wkOrd)
, layer[l - 1]
, layer[l]
, layer[l + 1]
, to_clear);
nStates += layer[l][ts.first].size();//count the states associated with ts.first
}
}//next task set (filter)
#pragma omp taskwait
collect_garbage(to_clear, layer[l+1]);
//-----------OMP-------------TASKS-------DONE-----------/
{//all current-layer states' values computed; tasksets not wholly fathomed were expanded.
slnTime.vlayerDone[l] = myClock::now();//get the current time
//measure current memory use /w respect to last recorded
auto memUse = t_memRec();
//record it
slnMem.emplace_back(memUse);
nStatesTotal += nStates;//add this layer's state count to the total
nFathomedStatesTotal += nFathomedStates;//add this layer's fathomed states' count to the total
//brag(layerNumber:wall-clock:delta:worstState:bestState)
slnLog.open(logName, std::ios_base::app);
slnLog << mkReportL(l
, time(NULL)
, slnTime._rel_time(0, l)
, slnTime._rel_time(l - 1, l)
, layer[l].size()
, t_stateDesc()
, t_stateDesc()
, memUse
, nStates
, nFathomedStates) << "\n";
slnLog.close();
}
/*now, if there's nothing to expand anymore
(every expansion is over p.UB), layer[l+1] is empty,
and we're done; break the cycle and stop the solution*/
if (layer.at(l + 1).empty())
{
slnLog.open(logName, std::ios_base::app);
slnLog << "\nDPBB_ALL_STATES_FATHOMED: impossible to get better than UB="<<this->UB << "\n";
slnLog.close();
break;
}
}//next layer
auto bfEndTime_t = time(NULL);
t_stateDesc full;
/////////////FINAL////LAYER//////////////////////////////////////////////////////////////////////////////
if (l == p.dim)//if we didn't fathom all the states in view of UB and reached the final layer
{
//done the ordinary layers; proceed to the complete problem BWD:(0,\rg{1}{dim})/FWD:(\trm,\rg{1}{dim})
//FWD: \trm==p.dim+1; BWD: (the) base==0
ptag lastIntf = (D == FWD) ? (p.dim + 1) : (0);
layer[p.dim].begin()->second.emplace(lastIntf, minmin(lastIntf, p.wkOrd.omask, layer[p.dim - 1], p, D));
full = t_stateDesc{ layer[p.dim].begin()->first //taskset==omask
, layer[p.dim].begin()->second.begin()->first //FWD: \trm, BWD: 0
, layer[p.dim].begin()->second.begin()->second };//the whole problem's cost
//time this doing
slnTime.vlayerDone[p.dim] = myClock::now(); bfEndTime_t = time(NULL);
//measure current memory use
auto memUse = t_memRec();
//record it
slnMem.emplace_back(memUse);
//log the event
slnLog.open(logName, std::ios_base::app);
slnLog << mkReportL(p.dim
, bfEndTime_t
, slnTime._rel_time(0, p.dim)
, slnTime._rel_time(p.dim - 1, p.dim)
, layer[p.dim].size(), full, full
, memUse
, this->nStatesTotal) << "\n"; //it's the last layer, tell about the total states' number
slnLog.close();
}
//----------BF----DONE----------------------------------/
//============REPORTS,==RECOVERY,==ETC.=================/
slnLog.open(logName, std::ios_base::app);
slnLog << "\n" << "BF DONE: " << mkTimeStamp(bfEndTime_t) << "\n" <<
"TOTAL DURATION: "
<< mkMsecDurationFull(slnTime._rel_time(0, l)) << "\n"
<< "TOTAL DURATION IN SECONDS: "
<< mkMsecDurationBrief(slnTime._rel_time(0, l)) << "\n"
<< "\n" << "RAM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().physMem)
<< "\n" << "VM USAGE AT LAST LAYER: " << to_stringBytes(slnMem.back().virtMem)
<< "\n" << "TOTAL STATES PROCESSED:" << nStatesTotal
<< "\n" << "RAM BYTES PER STATE(APPX):" << int(slnMem.back().physMem / nStatesTotal)
<< "\n" << "DPBB TOTAL STATES FATHOMED:" << nFathomedStatesTotal;
slnLog.close();
if (l==p.dim)//if we didn't fathom all the states in view of UB and reached the final layer
{
//recover the solution
this->result = recoverSln(full, layer, p, D);//can now destroy this->layer
//time the event
slnTime.recoveryDone = myClock::now();
//report the event
slnLog.open(logName, std::ios_base::app);
slnLog << "\n" << "SOLUTION RECOVERY DONE: " << mkTimeStamp(time(NULL)) << "\n" <<
"RECOVERY TOOK: "
<< mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[p.dim], slnTime.recoveryDone))
<< "\n"
<< "BF + RECOVERY DURATION: "
<< mkMsecDurationFull(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n"
<< "BF + RECOVERY DURATION IN SECONDS: "
<< mkMsecDurationBrief(msecDuration(slnTime.vlayerDone[0], slnTime.recoveryDone)) << "\n";
slnLog.close();
//====================FINAL==REPORT(DUMP)==================/
//NB:relies on result being written to this->result before
slnDump.open(dumpName);
slnDump << mkFullDump(p, result, D);
slnDump.close();
}
else //all states fathomed: given UB is proven to be a lower bound
{//consider not creating .dump in this case
slnDump.open(dumpName);
slnDump<<"\n VALUE: -1\n"; //let -1 say "UB proved true"
slnDump << "\n DPBB_ALL_STATES_FATHOMED: impossible to get better than UB=" << this->UB << "\n";
slnDump.close();
}
return this->result;
}
};
#endif // !DPBB_H_
|
target-35.c | #include <omp.h>
#include <stdlib.h>
#pragma omp declare target
__attribute__((noinline))
void
foo (int x, int y, int z, int *a, int *b)
{
if (x == 0)
{
int i, j;
for (i = 0; i < 64; i++)
#pragma omp parallel for shared (a, b)
for (j = 0; j < 32; j++)
foo (3, i, j, a, b);
}
else if (x == 1)
{
int i, j;
#pragma omp distribute dist_schedule (static, 1)
for (i = 0; i < 64; i++)
#pragma omp parallel for shared (a, b)
for (j = 0; j < 32; j++)
foo (3, i, j, a, b);
}
else if (x == 2)
{
int j;
#pragma omp parallel for shared (a, b)
for (j = 0; j < 32; j++)
foo (3, y, j, a, b);
}
else
{
#pragma omp atomic
b[y] += z;
#pragma omp atomic
*a += 1;
}
}
__attribute__((noinline))
int
bar (int x, int y, int z)
{
int a, b[64], i;
a = 8;
for (i = 0; i < 64; i++)
b[i] = i;
foo (x, y, z, &a, b);
if (x == 0)
{
if (a != 8 + 64 * 32)
return 1;
for (i = 0; i < 64; i++)
if (b[i] != i + 31 * 32 / 2)
return 1;
}
else if (x == 1)
{
int c = omp_get_num_teams ();
int d = omp_get_team_num ();
int e = d;
int f = 0;
for (i = 0; i < 64; i++)
if (i == e)
{
if (b[i] != i + 31 * 32 / 2)
return 1;
f++;
e = e + c;
}
else if (b[i] != i)
return 1;
if (a < 8 || a > 8 + f * 32)
return 1;
}
else if (x == 2)
{
if (a != 8 + 32)
return 1;
for (i = 0; i < 64; i++)
if (b[i] != i + (i == y ? 31 * 32 / 2 : 0))
return 1;
}
else if (x == 3)
{
if (a != 8 + 1)
return 1;
for (i = 0; i < 64; i++)
if (b[i] != i + (i == y ? z : 0))
return 1;
}
return 0;
}
#pragma omp end declare target
int
main ()
{
int i, j, err = 0;
#pragma omp target map(tofrom:err)
#pragma omp teams reduction(+:err)
err += bar (0, 0, 0);
if (err)
abort ();
#pragma omp target map(tofrom:err)
#pragma omp teams reduction(+:err)
err += bar (1, 0, 0);
if (err)
abort ();
#pragma omp target map(tofrom:err)
#pragma omp teams reduction(+:err)
#pragma omp distribute
for (i = 0; i < 64; i++)
err += bar (2, i, 0);
if (err)
abort ();
#pragma omp target map(tofrom:err)
#pragma omp teams reduction(+:err)
#pragma omp distribute
for (i = 0; i < 64; i++)
#pragma omp parallel for reduction(+:err)
for (j = 0; j < 32; j++)
err += bar (3, i, j);
if (err)
abort ();
return 0;
}
|
ac3d_openmp.c | #include<stdio.h>
#include"3d_lib.c"
// vx
void ac3d_openmp(double *vx, int BD_nx_vx, int BD_ny_vx, int BD_nz_vx,
double *pvxbtpp,
double *vy, int BD_nx_vy, int BD_ny_vy, int BD_nz_vy,
double *pvybtpp,
double *vz, int BD_nx_vz, int BD_ny_vz, int BD_nz_vz,
double *pvzbtpp,
double *tpp, double *ptppbvx, double *ptppbvy, double *ptppbvz,
int BD_nx_tpp, int BD_ny_tpp, int BD_nz_tpp,
double *rho, double *lambda, double *fdc,
double dt, double dx, double dy, double dz, int ext,
double *bhalf, double *ahalf, double *bfull, double *afull)
{
int i,j,k;
//********************* V_X *********************//
#pragma omp parallel
{
// vxbtxx
#pragma omp for collapse(2) private(j) nowait
for(k=0; k<BD_ny_vx; k++)
{
for(i=0; i<BD_nz_vx; i++)
{
for(j=1; j<ext; j++)
{
bound_x_3d(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
2.0/(*(rho+i+(BD_nx_vx-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vx-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
dx, dt, pvxbtpp, bhalf, ahalf, ext, fdc);
}
for(j=ext; j<BD_nx_vx-ext; j++)
{
body_x_3d(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
dx, dt, fdc);
}
}
}
//********************* V_Y *********************//
// vybtyy
#pragma omp for collapse(2) private(k) nowait
for(j=0; j<BD_nx_vy; j++)
{
for(i=0; i<BD_nz_vy; i++)
{
for(k=1; k<ext; k++)
{
bound_y_3d(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)),
2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vy-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)),
dy,dt, pvybtpp, bhalf, ahalf, ext, fdc);
}
for(k=ext; k<BD_ny_vy-ext; k++)
{
body_y_3d(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)),
dy,dt, fdc);
}
}
}
//********************* V_Z *********************//
// vzbtzz
#pragma omp for collapse(2) private(i) nowait
for(j=0; j<BD_nx_vz; j++)
{
for(k=0; k<BD_ny_vz; k++)
{
for(i=1;i<ext;i++)
{
unlimited_bound_z_3d(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
2.0/(*(rho+(BD_nz_vz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
dz,dt, pvzbtpp, bhalf, ahalf, ext, fdc);
}
for(i=ext; i<BD_nz_vz-ext; i++)
{
body_z_3d(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k,
tpp, BD_nz_tpp, BD_nx_tpp, 1,
2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)),
dz,dt, fdc);
}
}
}
#pragma omp barrier
//************ T_X ************//
#pragma omp for collapse(2) private(j)
for(k=0; k<BD_ny_tpp; k++)
{
for(i=0; i<BD_nz_tpp; i++)
{
for(j=2; j<ext; j++)
{
ac_bound_tpp_x_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt,
ptppbvx, bfull, afull, ext, fdc);
}
for(j=ext; j<BD_nx_tpp-ext; j++)
{
ac_body_tpp_x_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, dx, dt, fdc);
}
}
}
//************ T_Y ************//
#pragma omp for collapse(2) private(k)
for(i=0; i<BD_nz_tpp; i++)
{
for(j=0; j<BD_nx_tpp; j++)
{
for(k=2; k<ext; k++)
{
ac_bound_tpp_y_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, dy, dt,
ptppbvy, bfull, afull, ext, fdc);
}
for(k=ext; k<BD_ny_tpp-ext; k++)
{
ac_body_tpp_y_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, dy, dt, fdc);
}
}
}
//************ T_Z ************//
#pragma omp for collapse(2) private(i)
for(j=0; j<BD_nx_tpp; j++)
{
for(k=0; k<BD_ny_tpp; k++)
{
for(i=2; i<ext; i++)
{
ac_unlimited_bound_tpp_z_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt,
ptppbvz, bfull, afull, ext, fdc);
}
for(i=ext; i<BD_nz_tpp-ext; i++)
{
ac_body_tpp_z_3d(tpp, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp,
i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, dz, dt, fdc);
}
}
}
}
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
adept_source.h | // Copyright 2019 MIT License - Tim Kaler; Parallel Adept, Original license below.
/* adept_source.h - Source code for the Adept library
Copyright (C) 2012-2015 The University of Reading
Copyright (C) 2015-2017 European Centre for Medium-Range Weather Forecasts
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
This file was created automatically by script ./create_adept_source_header
on Sun 28 Jan 21:05:14 GMT 2018
It contains a concatenation of the source files from the Adept
library. The idea is that a program may #include this file in one of
its source files (typically the one containing the main function),
and then the Adept library will be built into the executable without
the need to link to an external library. All other source files
should just #include <adept.h> or <adept_arrays.h>. The ability to
use Adept in this way makes it easier to distribute an Adept package
that is usable on non-Unix platforms that are unable to use the
autoconf configure script to build external libraries.
If HAVE_BLAS is defined below then matrix multiplication will be
enabled; the BLAS library should be provided at the link stage
although no header file is required. If HAVE_LAPACK is defined
below then linear algebra routines will be enabled (matrix inverse
and solving linear systems of equations); again, the LAPACK library
should be provided at the link stage although no header file is
required.
*/
// #define TFK_DEBUG_PRINTS
/* Feel free to delete this warning: */
//#ifdef _MSC_FULL_VER
//#pragma message("warning: the adept_source.h header file has not been edited so BLAS matrix multiplication and LAPACK linear-algebra support have been disabled")
//#else
//#warning "The adept_source.h header file has not been edited so BLAS matrix multiplication and LAPACK linear-algebra support have been disabled"
//#endif
/* Uncomment this if you are linking to the BLAS library (header file
not required) to enable matrix multiplication */
#define HAVE_BLAS 1
/* Uncomment this if you are linking to the LAPACK library (header
file not required) */
//#define HAVE_LAPACK 1
/* Uncomment this if you have the cblas.h header from OpenBLAS */
//#define HAVE_OPENBLAS_CBLAS_HEADER
/*
The individual source files now follow.
*/
#ifndef AdeptSource_H
#define AdeptSource_H 1
// =================================================================
// Contents of config_platform_independent.h
// =================================================================
/* config_platform_independent.h. Generated from config_platform_independent.h.in by configure. */
/* config_platform_independent.h.in. */
/* Name of package */
#define PACKAGE "adept"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "tfk@mit.edu"
/* Define to the full name of this package. */
#define PACKAGE_NAME "adept"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "adept 2.0.5"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "adept"
/* Define to the home page for this package. */
#define PACKAGE_URL "http://www.met.reading.ac.uk/clouds/adept/"
/* Define to the version of this package. */
#define PACKAGE_VERSION "2.0.5"
/* Version number of package */
#define VERSION "2.0.5"
// =================================================================
// Contents of cpplapack.h
// =================================================================
/* cpplapack.h -- C++ interface to LAPACK
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#ifndef AdeptCppLapack_H
#define AdeptCppLapack_H 1
#include <vector>
#include <set>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_LAPACK
extern "C" {
// External LAPACK Fortran functions
void sgetrf_(const int* m, const int* n, float* a, const int* lda, int* ipiv, int* info);
void dgetrf_(const int* m, const int* n, double* a, const int* lda, int* ipiv, int* info);
void sgetri_(const int* n, float* a, const int* lda, const int* ipiv,
float* work, const int* lwork, int* info);
void dgetri_(const int* n, double* a, const int* lda, const int* ipiv,
double* work, const int* lwork, int* info);
void ssytrf_(const char* uplo, const int* n, float* a, const int* lda, int* ipiv,
float* work, const int* lwork, int* info);
void dsytrf_(const char* uplo, const int* n, double* a, const int* lda, int* ipiv,
double* work, const int* lwork, int* info);
void ssytri_(const char* uplo, const int* n, float* a, const int* lda,
const int* ipiv, float* work, int* info);
void dsytri_(const char* uplo, const int* n, double* a, const int* lda,
const int* ipiv, double* work, int* info);
void ssysv_(const char* uplo, const int* n, const int* nrhs, float* a, const int* lda,
int* ipiv, float* b, const int* ldb, float* work, const int* lwork, int* info);
void dsysv_(const char* uplo, const int* n, const int* nrhs, double* a, const int* lda,
int* ipiv, double* b, const int* ldb, double* work, const int* lwork, int* info);
void sgesv_(const int* n, const int* nrhs, float* a, const int* lda,
int* ipiv, float* b, const int* ldb, int* info);
void dgesv_(const int* n, const int* nrhs, double* a, const int* lda,
int* ipiv, double* b, const int* ldb, int* info);
}
namespace adept {
// Overloaded functions provide both single &
// double precision versions, and prevents the huge lapacke.h having
// to be included in all user code
namespace internal {
typedef int lapack_int;
// Factorize a general matrix
inline
int cpplapack_getrf(int n, float* a, int lda, int* ipiv) {
int info;
sgetrf_(&n, &n, a, &lda, ipiv, &info);
return info;
}
inline
int cpplapack_getrf(int n, double* a, int lda, int* ipiv) {
int info;
dgetrf_(&n, &n, a, &lda, ipiv, &info);
return info;
}
// Invert a general matrix
inline
int cpplapack_getri(int n, float* a, int lda, const int* ipiv) {
int info;
float work_query;
int lwork = -1;
// Find out how much work memory required
sgetri_(&n, a, &lda, ipiv, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<float> work(static_cast<size_t>(lwork));
// Do full calculation
sgetri_(&n, a, &lda, ipiv, &work[0], &lwork, &info);
return info;
}
inline
int cpplapack_getri(int n, double* a, int lda, const int* ipiv) {
int info;
double work_query;
int lwork = -1;
// Find out how much work memory required
dgetri_(&n, a, &lda, ipiv, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<double> work(static_cast<size_t>(lwork));
// Do full calculation
dgetri_(&n, a, &lda, ipiv, &work[0], &lwork, &info);
return info;
}
// Factorize a symmetric matrix
inline
int cpplapack_sytrf(char uplo, int n, float* a, int lda, int* ipiv) {
int info;
float work_query;
int lwork = -1;
// Find out how much work memory required
ssytrf_(&uplo, &n, a, &lda, ipiv, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<float> work(static_cast<size_t>(lwork));
// Do full calculation
ssytrf_(&uplo, &n, a, &lda, ipiv, &work[0], &lwork, &info);
return info;
}
inline
int cpplapack_sytrf(char uplo, int n, double* a, int lda, int* ipiv) {
int info;
double work_query;
int lwork = -1;
// Find out how much work memory required
dsytrf_(&uplo, &n, a, &lda, ipiv, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<double> work(static_cast<size_t>(lwork));
// Do full calculation
dsytrf_(&uplo, &n, a, &lda, ipiv, &work[0], &lwork, &info);
return info;
}
// Invert a symmetric matrix
inline
int cpplapack_sytri(char uplo, int n, float* a, int lda, const int* ipiv) {
int info;
std::vector<float> work(n);
ssytri_(&uplo, &n, a, &lda, ipiv, &work[0], &info);
return info;
}
inline
int cpplapack_sytri(char uplo, int n, double* a, int lda, const int* ipiv) {
int info;
std::vector<double> work(n);
dsytri_(&uplo, &n, a, &lda, ipiv, &work[0], &info);
return info;
}
// Solve system of linear equations with general matrix
inline
int cpplapack_gesv(int n, int nrhs, float* a, int lda,
int* ipiv, float* b, int ldb) {
int info;
sgesv_(&n, &nrhs, a, &lda, ipiv, b, &lda, &info);
return info;
}
inline
int cpplapack_gesv(int n, int nrhs, double* a, int lda,
int* ipiv, double* b, int ldb) {
int info;
dgesv_(&n, &nrhs, a, &lda, ipiv, b, &lda, &info);
return info;
}
// Solve system of linear equations with symmetric matrix
inline
int cpplapack_sysv(char uplo, int n, int nrhs, float* a, int lda, int* ipiv,
float* b, int ldb) {
int info;
float work_query;
int lwork = -1;
// Find out how much work memory required
ssysv_(&uplo, &n, &nrhs, a, &lda, ipiv, b, &ldb, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<float> work(static_cast<size_t>(lwork));
// Do full calculation
ssysv_(&uplo, &n, &nrhs, a, &lda, ipiv, b, &ldb, &work[0], &lwork, &info);
return info;
}
inline
int cpplapack_sysv(char uplo, int n, int nrhs, double* a, int lda, int* ipiv,
double* b, int ldb) {
int info;
double work_query;
int lwork = -1;
// Find out how much work memory required
dsysv_(&uplo, &n, &nrhs, a, &lda, ipiv, b, &ldb, &work_query, &lwork, &info);
lwork = static_cast<int>(work_query);
std::vector<double> work(static_cast<size_t>(lwork));
// Do full calculation
dsysv_(&uplo, &n, &nrhs, a, &lda, ipiv, b, &ldb, &work[0], &lwork, &info);
return info;
}
}
}
#endif
#endif
// =================================================================
// Contents of Array.cpp
// =================================================================
/* Array.cpp -- Functions and global variables controlling array behaviour
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <adept/Array.h>
namespace adept {
namespace internal {
bool array_row_major_order = true;
// bool array_print_curly_brackets = true;
// Variables describing how arrays are written to a stream
ArrayPrintStyle array_print_style = PRINT_STYLE_CURLY;
std::string vector_separator = ", ";
std::string vector_print_before = "{";
std::string vector_print_after = "}";
std::string array_opening_bracket = "{";
std::string array_closing_bracket = "}";
std::string array_contiguous_separator = ", ";
std::string array_non_contiguous_separator = ",\n";
std::string array_print_before = "\n{";
std::string array_print_after = "}";
std::string array_print_empty_before = "(empty rank-";
std::string array_print_empty_after = " array)";
bool array_print_indent = true;
bool array_print_empty_rank = true;
}
void set_array_print_style(ArrayPrintStyle ps) {
using namespace internal;
switch (ps) {
case PRINT_STYLE_PLAIN:
vector_separator = " ";
vector_print_before = "";
vector_print_after = "";
array_opening_bracket = "";
array_closing_bracket = "";
array_contiguous_separator = " ";
array_non_contiguous_separator = "\n";
array_print_before = "";
array_print_after = "";
array_print_empty_before = "(empty rank-";
array_print_empty_after = " array)";
array_print_indent = false;
array_print_empty_rank = true;
break;
case PRINT_STYLE_CSV:
vector_separator = ", ";
vector_print_before = "";
vector_print_after = "";
array_opening_bracket = "";
array_closing_bracket = "";
array_contiguous_separator = ", ";
array_non_contiguous_separator = "\n";
array_print_before = "";
array_print_after = "";
array_print_empty_before = "empty";
array_print_empty_after = "";
array_print_indent = false;
array_print_empty_rank = false;
break;
case PRINT_STYLE_MATLAB:
vector_separator = " ";
vector_print_before = "[";
vector_print_after = "]";
array_opening_bracket = "";
array_closing_bracket = "";
array_contiguous_separator = " ";
array_non_contiguous_separator = ";\n";
array_print_before = "[";
array_print_after = "]";
array_print_empty_before = "[";
array_print_empty_after = "]";
array_print_indent = true;
array_print_empty_rank = false;
break;
case PRINT_STYLE_CURLY:
vector_separator = ", ";
vector_print_before = "{";
vector_print_after = "}";
array_opening_bracket = "{";
array_closing_bracket = "}";
array_contiguous_separator = ", ";
array_non_contiguous_separator = ",\n";
array_print_before = "\n{";
array_print_after = "}";
array_print_empty_before = "(empty rank-";
array_print_empty_after = " array)";
array_print_indent = true;
array_print_empty_rank = true;
break;
default:
throw invalid_operation("Array print style not understood");
}
array_print_style = ps;
}
}
// =================================================================
// Contents of Stack.cpp
// =================================================================
/* Stack.cpp -- Stack for storing automatic differentiation information
Copyright (C) 2012-2014 University of Reading
Copyright (C) 2015 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <iostream>
#include <cstring> // For memcpy
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <adept/Stack.h>
namespace adept {
using namespace internal;
// Global pointers to the current thread, the second of which is
// thread safe. The first is only used if ADEPT_STACK_THREAD_UNSAFE
// is defined.
//ADEPT_THREAD_LOCAL Stack* _stack_current_thread = 0;
Stack* _stack_current_thread = 0;
Stack* _stack_current_thread_unsafe = 0;
// MEMBER FUNCTIONS OF THE STACK CLASS
// Destructor: frees dynamically allocated memory (if any)
Stack::~Stack() {
// If this is the currently active stack then set to NULL as
// "this" is shortly to become invalid
if (is_thread_unsafe_) {
if (_stack_current_thread_unsafe == this) {
_stack_current_thread_unsafe = 0;
}
}
else if (_stack_current_thread == this) {
_stack_current_thread = 0;
}
#ifndef ADEPT_STACK_STORAGE_STL
if (gradient_) {
delete[] gradient_;
}
#endif
}
// Make this stack "active" by copying its "this" pointer to a
// global variable; this makes it the stack that aReal objects
// subsequently interact with when being created and participating
// in mathematical expressions
void
Stack::activate()
{
// Check that we don't already have an active stack in this thread
if ((is_thread_unsafe_ && _stack_current_thread_unsafe
&& _stack_current_thread_unsafe != this)
|| ((!is_thread_unsafe_) && _stack_current_thread
&& _stack_current_thread != this)) {
throw(stack_already_active());
}
else {
if (!is_thread_unsafe_) {
_stack_current_thread = this;
}
else {
_stack_current_thread_unsafe = this;
}
}
}
// Set the maximum number of threads to be used in Jacobian
// calculations, if possible. A value of 1 indicates that OpenMP
// will not be used, while a value of 0 indicates that the number
// will match the number of available processors. Returns the
// maximum that will be used, which will be 1 if the Adept library
// was compiled without OpenMP support. Note that a value of 1 will
// disable the use of OpenMP with Adept, so Adept will then use no
// OpenMP directives or function calls. Note that if in your program
// you use OpenMP with each thread performing automatic
// differentiaion with its own independent Adept stack, then
// typically only one OpenMP thread is available for each Jacobian
// calculation, regardless of whether you call this function.
int
Stack::set_max_jacobian_threads(int n)
{
#ifdef _OPENMP
if (have_openmp_) {
if (n == 1) {
openmp_manually_disabled_ = true;
return 1;
}
else if (n < 1) {
openmp_manually_disabled_ = false;
omp_set_num_threads(omp_get_num_procs());
return omp_get_max_threads();
}
else {
openmp_manually_disabled_ = false;
omp_set_num_threads(n);
return omp_get_max_threads();
}
}
#endif
return 1;
}
// Return maximum number of OpenMP threads to be used in Jacobian
// calculation
int
Stack::max_jacobian_threads() const
{
#ifdef _OPENMP
if (have_openmp_) {
if (openmp_manually_disabled_) {
return 1;
}
else {
return omp_get_max_threads();
}
}
#endif
return 1;
}
// Perform to adjoint computation (reverse mode). It is assumed that
// some gradients have been assigned already, otherwise the function
// returns with an error.
void
Stack::compute_adjoint()
{
if (gradients_are_initialized()) {
// Loop backwards through the derivative statements
//{
//wl_stacks stacks = worker_local_stacks[thread_local_worker_id];
////printf("there are %d statements \n", stacks.statement_stack_arr_len);
//for (uIndex ist = stacks.statement_stack_arr_len-1; ist > 0; ist--) {
// const Statement& statement = stacks.statement_stack_arr[ist];//(*thread_local_statement)[ist];//statement_[ist];
// //const Statement& statement = statement_[ist];
// // We copy the RHS gradient (LHS in the original derivative
// // statement but swapped in the adjoint equivalent) to "a" in
// // case it appears on the LHS in any of the following statements
// Real a = gradient_[statement.index];
// gradient_[statement.index] = 0.0;
// // By only looping if a is non-zero we gain a significant speed-up
// if (a != 0.0) {
// // Loop over operations
// //for (uIndex i = statement_[ist-1].end_plus_one;
// for (uIndex i = stacks.statement_stack_arr[ist-1].end_plus_one;
// i < statement.end_plus_one; i++) {
// //gradient_[index_[i]] += multiplier_[i]*a;
// //gradient_[(*thread_local_index)[i]] += (*thread_local_multiplier)[i]*a;//multiplier_[i]*a;
// gradient_[stacks.operation_stack_arr[i]] += stacks.multiplier_stack_arr[i]*a;//multiplier_[i]*a;
// }
// }
//}
//}
//return;
tfk_reducer.get_tls_references();
/*std::vector<triple_vector_wl> stacks =*/
tfk_reducer.sp_tree.set_recording(false);
tfk_reducer.collect();
//printf("num gradients initialized is %d\n", n_gradients_registered_);
tfk_gradient_table* my_gradient_table = new tfk_gradient_table(n_gradients_registered_, gradient_);
//my_gradient_table->gradient_table_local = gradient_;
//tfk_reducer.sp_tree.walk_tree_process(tfk_reducer.sp_tree.get_root(), gradient_, gradient_, gradient_init, n_gradients_registered_, debug_set);
tfk_gradient_table* ret = tfk_reducer.sp_tree.walk_tree_process(tfk_reducer.sp_tree.get_root(), my_gradient_table, n_gradients_registered_);
delete ret;
tfk_reducer.sp_tree.set_recording(true);
//#ifdef TFK_DEBUG_PRINTS
//printf("the length of the stacks is %d\n", stacks.size());
//#endif
//int counter = 0;
//for (int i = stacks.size()-1; i >= 0; i--) {
// if (stacks[i].statement_stack_end == stacks[i].statement_stack_start) continue;
// for (uIndex ist = stacks[i].statement_stack_end; ist-- > stacks[i].statement_stack_start;) {
// const Statement& statement = worker_local_stacks[stacks[i].worker_id].statement_stack_arr[ist];
// if (statement.index == -1) continue;
// int op_count = 0;
// Real a = gradient_[statement.index];
// gradient_[statement.index] = 0.0;
// if (a != 0.0) {
// #ifdef TFK_DEBUG_PRINTS
// printf("statement %d edges:", statement.index);
// #endif
// if (ist == stacks[i].statement_stack_start) {
// for (uIndex j = stacks[i].operation_stack_start;
// j < statement.end_plus_one; j++) {
// op_count++;
// Real multiplier_test = worker_local_stacks[stacks[i].worker_id].multiplier_stack_arr[j];
// uIndex operation_stack_index = worker_local_stacks[stacks[i].worker_id].operation_stack_arr[j];
// gradient_[operation_stack_index] += multiplier_test*a;//multiplier_[i]*a;
// #ifdef TFK_DEBUG_PRINTS
// printf("%d,", operation_stack_index);
// #endif
// }
// #ifdef TFK_DEBUG_PRINTS
// printf("; ist %d start %d end %d\n", ist, stacks[i].statement_stack_end, stacks[i].statement_stack_start);
// #endif
// } else {
// for (uIndex j = worker_local_stacks[stacks[i].worker_id].statement_stack_arr[ist-1].end_plus_one;
// j < statement.end_plus_one; j++) {
// op_count++;
// Real multiplier_test = worker_local_stacks[stacks[i].worker_id].multiplier_stack_arr[j];
// uIndex operation_stack_index = worker_local_stacks[stacks[i].worker_id].operation_stack_arr[j];
// gradient_[operation_stack_index] += multiplier_test*a;//multiplier_[i]*a;
// #ifdef TFK_DEBUG_PRINTS
// printf("%d,", operation_stack_index);
// #endif
// }
// #ifdef TFK_DEBUG_PRINTS
// printf(": ist %d start %d end %d\n", ist, stacks[i].statement_stack_end, stacks[i].statement_stack_start);
// #endif
// }
// }
// }
//}
return;
}
printf("should never get to this point\n");
if (gradients_are_initialized()) {
//
//
//
//
// //we're going to create the operation stack.
//
//
// // Loop backwards through the derivative statements
// std::map<uIndex, int> dependency_count;
// for (uIndex ist = thread_local_statement->size()-1; ist > 0; ist--) {
// const Statement& statement = (*thread_local_statement)[ist];//statement_[ist];
// printf("statement index %d, edges to:", statement.index);
// Real a = gradient_[statement.index];
// //if (a != 0.0) {
//
// for (uIndex i = (*thread_local_statement)[ist-1].end_plus_one;
// i < statement.end_plus_one; i++) {
// dependency_count[(*thread_local_index)[i]] += 1;
// printf("%d,",(*thread_local_index)[i]);
// }
// printf("\n");
// //}
// }
// //int iter = 0;
// //// Loop backwards through the derivative statements
// //for (uIndex ist = thread_local_statement->size()-1; ist > 0; ist--) {
// // const Statement& statement = (*thread_local_statement)[ist];//statement_[ist];
// // //printf("dependency count for ist %llu is %d\n", ist, dependency_count[statement.index]);
// // //const Statement& statement = statement_[ist];
// // // We copy the RHS gradient (LHS in the original derivative
// // // statement but swapped in the adjoint equivalent) to "a" in
// // // case it appears on the LHS in any of the following statements
// // Real a = gradient_[statement.index];
// // gradient_[statement.index] = 0.0;
// // // By only looping if a is non-zero we gain a significant speed-up
// // //if (a != 0.0) {
// // // Loop over operations
//
// // //for (uIndex i = statement_[ist-1].end_plus_one;
// // for (uIndex i = (*thread_local_statement)[ist-1].end_plus_one;
// // i < statement.end_plus_one; i++) {
// // //gradient_[index_[i]] += multiplier_[i]*a;
// // gradient_[(*thread_local_index)[i]] += (*thread_local_multiplier)[i]*a;//multiplier_[i]*a;
//
// // dependency_count[(*thread_local_index)[i]] -= 1;
// // }
// // //}
// // iter++;
// // if (iter == 5) break;
// //}
//
//
// // Loop backwards through the derivative statements
// for (uIndex ist = thread_local_statement->size()-1; ist > 0; ist--) {
// const Statement& statement = (*thread_local_statement)[ist];//statement_[ist];
// //const Statement& statement = statement_[ist];
// // We copy the RHS gradient (LHS in the original derivative
// // statement but swapped in the adjoint equivalent) to "a" in
// // case it appears on the LHS in any of the following statements
// Real a = gradient_[statement.index];
// gradient_[statement.index] = 0.0;
// // By only looping if a is non-zero we gain a significant speed-up
// if (a != 0.0) {
// // Loop over operations
//
// //for (uIndex i = statement_[ist-1].end_plus_one;
// for (uIndex i = (*thread_local_statement)[ist-1].end_plus_one;
// i < statement.end_plus_one; i++) {
// //gradient_[index_[i]] += multiplier_[i]*a;
// gradient_[(*thread_local_index)[i]] += (*thread_local_multiplier)[i]*a;//multiplier_[i]*a;
// }
// }
// }
}
else {
throw(gradients_not_initialized());
}
}
// Perform tangent linear computation (forward mode). It is assumed
// that some gradients have been assigned already, otherwise the
// function returns with an error.
void
Stack::compute_tangent_linear()
{
if (gradients_are_initialized()) {
// Loop forward through the statements
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
// We copy the LHS to "a" in case it appears on the RHS in any
// of the following statements
Real a = 0.0;
for (uIndex i = statement_[ist-1].end_plus_one;
i < statement.end_plus_one; i++) {
a += multiplier_[i]*gradient_[index_[i]];
}
gradient_[statement.index] = a;
}
}
else {
throw(gradients_not_initialized());
}
}
// Register n gradients
uIndex
Stack::do_register_gradients(const uIndex& n) {
#ifndef TFK_ALLOW_UNREGISTER
//printf("do register gradients\n");
__sync_fetch_and_add(&max_gradient_, n);
uIndex ret = __sync_fetch_and_add(&n_gradients_registered_, n);
for (uIndex ind = ret; ind < ret+n; ind++) {
worker_local_stacks[thread_local_worker_id].add_register_gradient(ind);
}
return ret;//__sync_fetch_and_add(&n_gradients_registered_, n);
#endif
stack_mutex.lock();
n_gradients_registered_ += n;
if (!gap_list_.empty()) {
uIndex return_val;
// Insert in a gap, if there is one big enough
for (GapListIterator it = gap_list_.begin();
it != gap_list_.end(); it++) {
uIndex len = it->end + 1 - it->start;
if (len > n) {
// Gap a bit larger than needed: reduce its size
return_val = it->start;
it->start += n;
stack_mutex.unlock();
return return_val;
}
else if (len == n) {
// Gap exactly the size needed: fill it and remove from list
return_val = it->start;
if (most_recent_gap_ == it) {
gap_list_.erase(it);
most_recent_gap_ = gap_list_.end();
}
else {
gap_list_.erase(it);
}
stack_mutex.unlock();
return return_val;
}
}
}
// No suitable gap found; instead add to end of gradient vector
i_gradient_ += n;
if (i_gradient_ > max_gradient_) {
max_gradient_ = i_gradient_;
}
uIndex return_val = i_gradient_ - n;
stack_mutex.unlock();
return return_val;
}
// If an aReal object is deleted, its gradient_index is
// unregistered from the stack. If this is at the top of the stack
// then this is easy and is done inline; this is the usual case
// since C++ trys to deallocate automatic objects in the reverse
// order to that in which they were allocated. If it is not at the
// top of the stack then a non-inline function is called to ensure
// that the gap list is adjusted correctly.
void
Stack::unregister_gradient_not_top(const uIndex& gradient_index)
{
#ifndef TFK_ALLOW_UNREGISTER
return;
#endif
//stack_mutex.lock();
enum {
ADDED_AT_BASE,
ADDED_AT_TOP,
NEW_GAP,
NOT_FOUND
} status = NOT_FOUND;
// First try to find if the unregistered element is at the
// start or end of an existing gap
if (!gap_list_.empty() && most_recent_gap_ != gap_list_.end()) {
// We have a "most recent" gap - check whether the gradient
// to be unregistered is here
Gap& current_gap = *most_recent_gap_;
if (gradient_index == current_gap.start - 1) {
current_gap.start--;
status = ADDED_AT_BASE;
}
else if (gradient_index == current_gap.end + 1) {
current_gap.end++;
status = ADDED_AT_TOP;
}
// Should we check for erroneous removal from middle of gap?
}
if (status == NOT_FOUND) {
// Search other gaps
for (GapListIterator it = gap_list_.begin();
it != gap_list_.end(); it++) {
if (gradient_index <= it->end + 1) {
// Gradient to unregister is either within the gap
// referenced by iterator "it", or it is between "it"
// and the previous gap in the list
if (gradient_index == it->start - 1) {
status = ADDED_AT_BASE;
it->start--;
most_recent_gap_ = it;
}
else if (gradient_index == it->end + 1) {
status = ADDED_AT_TOP;
it->end++;
most_recent_gap_ = it;
}
else {
// Insert a new gap of width 1; note that list::insert
// inserts *before* the specified location
most_recent_gap_
= gap_list_.insert(it, Gap(gradient_index));
status = NEW_GAP;
}
break;
}
}
if (status == NOT_FOUND) {
gap_list_.push_back(Gap(gradient_index));
most_recent_gap_ = gap_list_.end();
most_recent_gap_--;
}
}
// Finally check if gaps have merged
if (status == ADDED_AT_BASE
&& most_recent_gap_ != gap_list_.begin()) {
// Check whether the gap has merged with the next one
GapListIterator it = most_recent_gap_;
it--;
if (it->end == most_recent_gap_->start - 1) {
// Merge two gaps
most_recent_gap_->start = it->start;
gap_list_.erase(it);
}
}
else if (status == ADDED_AT_TOP) {
GapListIterator it = most_recent_gap_;
it++;
if (it != gap_list_.end()
&& it->start == most_recent_gap_->end + 1) {
// Merge two gaps
most_recent_gap_->end = it->end;
gap_list_.erase(it);
}
}
//stack_mutex.unlock();
}
// Unregister n gradients starting at gradient_index
void
Stack::unregister_gradients(const uIndex& gradient_index,
const uIndex& n)
{
for (uIndex ind = gradient_index; ind < gradient_index+n; ind++) {
worker_local_stacks[thread_local_worker_id].add_unregister_gradient(ind);
}
__sync_fetch_and_sub(&max_gradient_, n);
#ifndef TFK_ALLOW_UNREGISTER
return;
#endif
stack_mutex.lock();
n_gradients_registered_ -= n;
if (gradient_index+n == i_gradient_) {
// Gradient to be unregistered is at the top of the stack
i_gradient_ -= n;
if (!gap_list_.empty()) {
Gap& last_gap = gap_list_.back();
if (i_gradient_ == last_gap.end+1) {
// We have unregistered the elements between the "gap" of
// unregistered element and the top of the stack, so can set
// the variables indicating the presence of the gap to zero
i_gradient_ = last_gap.start;
GapListIterator it = gap_list_.end();
it--;
if (most_recent_gap_ == it) {
most_recent_gap_ = gap_list_.end();
}
gap_list_.pop_back();
}
}
}
else { // Gradients to be unregistered not at top of stack.
enum {
ADDED_AT_BASE,
ADDED_AT_TOP,
NEW_GAP,
NOT_FOUND
} status = NOT_FOUND;
// First try to find if the unregistered element is at the start
// or end of an existing gap
if (!gap_list_.empty() && most_recent_gap_ != gap_list_.end()) {
// We have a "most recent" gap - check whether the gradient
// to be unregistered is here
Gap& current_gap = *most_recent_gap_;
if (gradient_index == current_gap.start - n) {
current_gap.start -= n;
status = ADDED_AT_BASE;
}
else if (gradient_index == current_gap.end + 1) {
current_gap.end += n;
status = ADDED_AT_TOP;
}
/*
else if (gradient_index > current_gap.start - n
&& gradient_index < current_gap.end + 1) {
std::cout << "** Attempt to find " << gradient_index << " in gaps ";
print_gaps();
std::cout << "\n";
throw invalid_operation("Gap list corruption");
}
*/
// Should we check for erroneous removal from middle of gap?
}
if (status == NOT_FOUND) {
// Search other gaps
for (GapListIterator it = gap_list_.begin();
it != gap_list_.end(); it++) {
if (gradient_index <= it->end + 1) {
// Gradient to unregister is either within the gap
// referenced by iterator "it", or it is between "it" and
// the previous gap in the list
if (gradient_index == it->start - n) {
status = ADDED_AT_BASE;
it->start -= n;
most_recent_gap_ = it;
}
else if (gradient_index == it->end + 1) {
status = ADDED_AT_TOP;
it->end += n;
most_recent_gap_ = it;
}
/*
else if (gradient_index > it->start - n) {
std::cout << "*** Attempt to find " << gradient_index << " in gaps ";
print_gaps();
std::cout << "\n";
throw invalid_operation("Gap list corruption");
}
*/
else {
// Insert a new gap; note that list::insert inserts
// *before* the specified location
most_recent_gap_
= gap_list_.insert(it, Gap(gradient_index,
gradient_index+n-1));
status = NEW_GAP;
}
break;
}
}
if (status == NOT_FOUND) {
gap_list_.push_back(Gap(gradient_index,
gradient_index+n-1));
most_recent_gap_ = gap_list_.end();
most_recent_gap_--;
}
}
// Finally check if gaps have merged
if (status == ADDED_AT_BASE
&& most_recent_gap_ != gap_list_.begin()) {
// Check whether the gap has merged with the next one
GapListIterator it = most_recent_gap_;
it--;
if (it->end == most_recent_gap_->start - 1) {
// Merge two gaps
most_recent_gap_->start = it->start;
gap_list_.erase(it);
}
}
else if (status == ADDED_AT_TOP) {
GapListIterator it = most_recent_gap_;
it++;
if (it != gap_list_.end()
&& it->start == most_recent_gap_->end + 1) {
// Merge two gaps
most_recent_gap_->end = it->end;
gap_list_.erase(it);
}
}
}
stack_mutex.unlock();
}
// Print each derivative statement to the specified stream (standard
// output if omitted)
void
Stack::print_statements(std::ostream& os) const
{
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
os << ist
<< ": d[" << statement.index
<< "] = ";
if (statement_[ist-1].end_plus_one == statement_[ist].end_plus_one) {
os << "0\n";
}
else {
for (uIndex i = statement_[ist-1].end_plus_one;
i < statement.end_plus_one; i++) {
os << " + " << multiplier_[i] << "*d[" << index_[i] << "]";
}
os << "\n";
}
}
}
// Print the current gradient list to the specified stream (standard
// output if omitted)
bool
Stack::print_gradients(std::ostream& os) const
{
if (gradients_are_initialized()) {
for (uIndex i = 0; i < max_gradient_; i++) {
if (i%10 == 0) {
if (i != 0) {
os << "\n";
}
os << i << ":";
}
os << " " << gradient_[i];
}
os << "\n";
return true;
}
else {
os << "No gradients initialized\n";
return false;
}
}
// Print the list of gaps in the gradient list to the specified
// stream (standard output if omitted)
void
Stack::print_gaps(std::ostream& os) const
{
for (std::list<Gap>::const_iterator it = gap_list_.begin();
it != gap_list_.end(); it++) {
os << it->start << "-" << it->end << " ";
}
}
#ifndef ADEPT_STACK_STORAGE_STL
// Initialize the vector of gradients ready for the adjoint
// calculation
void
Stack::initialize_gradients()
{
//printf("num allocated gradients is %d, max_gradient %d, n registered gradients: %d\n", n_allocated_gradients_, max_gradient_, n_gradients_registered_);
uIndex max_gradient = n_gradients_registered_;
if (max_gradient > 0) {
if (n_allocated_gradients_ < max_gradient) {
if (gradient_) {
delete[] gradient_;
}
gradient_ = new Real[max_gradient];
n_allocated_gradients_ = max_gradient;
}
for (uIndex i = 0; i < max_gradient; i++) {
gradient_[i] = 0.0;
}
}
//max_gradient_ = n_gradients_registered_;
gradients_initialized_ = true;
}
#else
void
Stack::initialize_gradients()
{
gradient_.resize(max_gradient_+10, 0.0);
gradients_initialized_ = true;
}
#endif
// Report information about the stack to the specified stream, or
// standard output if omitted; note that this is synonymous with
// sending the Stack object to a stream using the "<<" operator.
void
Stack::print_status(std::ostream& os) const
{
os << "Automatic Differentiation Stack (address " << this << "):\n";
if ((!is_thread_unsafe_) && _stack_current_thread == this) {
os << " Currently attached - thread safe\n";
}
else if (is_thread_unsafe_ && _stack_current_thread_unsafe == this) {
os << " Currently attached - thread unsafe\n";
}
else {
os << " Currently detached\n";
}
os << " Recording status:\n";
if (is_recording_) {
os << " Recording is ON\n";
}
else {
os << " Recording is PAUSED\n";
}
// Account for the null statement at the start by subtracting one
os << " " << n_statements()-1 << " statements ("
<< n_allocated_statements() << " allocated)";
os << " and " << n_operations() << " operations ("
<< n_allocated_operations() << " allocated)\n";
os << " " << n_gradients_registered() << " gradients currently registered ";
os << "and a total of " << max_gradients() << " needed (current index "
<< i_gradient() << ")\n";
if (gap_list_.empty()) {
os << " Gradient list has no gaps\n";
}
else {
os << " Gradient list has " << gap_list_.size() << " gaps (";
print_gaps(os);
os << ")\n";
}
os << " Computation status:\n";
if (gradients_are_initialized()) {
os << " " << max_gradients() << " gradients assigned ("
<< n_allocated_gradients() << " allocated)\n";
}
else {
os << " 0 gradients assigned (" << n_allocated_gradients()
<< " allocated)\n";
}
os << " Jacobian size: " << n_dependents() << "x" << n_independents() << "\n";
if (n_dependents() <= 10 && n_independents() <= 10) {
os << " Independent indices:";
for (std::size_t i = 0; i < independent_index_.size(); ++i) {
os << " " << independent_index_[i];
}
os << "\n Dependent indices: ";
for (std::size_t i = 0; i < dependent_index_.size(); ++i) {
os << " " << dependent_index_[i];
}
os << "\n";
}
#ifdef _OPENMP
if (have_openmp_) {
if (openmp_manually_disabled_) {
os << " Parallel Jacobian calculation manually disabled\n";
}
else {
os << " Parallel Jacobian calculation can use up to "
<< omp_get_max_threads() << " threads\n";
os << " Each thread treats " << ADEPT_MULTIPASS_SIZE
<< " (in)dependent variables\n";
}
}
else {
#endif
os << " Parallel Jacobian calculation not available\n";
#ifdef _OPENMP
}
#endif
}
} // End namespace adept
// =================================================================
// Contents of StackStorageOrig.cpp
// =================================================================
/* StackStorageOrig.cpp -- Original storage of stacks using STL containers
Copyright (C) 2014-2015 University of Reading
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
The Stack class inherits from a class providing the storage (and
interface to the storage) for the derivative statements that are
accumulated during the execution of an algorithm. The derivative
statements are held in two stacks described by Hogan (2014): the
"statement stack" and the "operation stack".
This file provides one of the original storage engine, which used
std::vector to hold the two stacks. Note that these stacks are
contiguous in memory, which is not ideal for very large algorithms.
*/
#include <cstring>
#include <cilk-adept-source/sp_tree.cpp>
#include <cilk-adept-source/tfkparallel.cpp>
#include <adept/StackStorageOrig.h>
namespace adept {
namespace internal {
StackStorageOrig::~StackStorageOrig() {
if (statement_) {
delete[] statement_;
}
if (multiplier_) {
delete[] multiplier_;
}
if (index_) {
delete[] index_;
}
}
// Double the size of the operation stack, or grow it even more if
// the requested minimum number of extra entries (min) is greater
// than this would allow
void
StackStorageOrig::grow_operation_stack(uIndex min)
{
uIndex new_size = 2*n_allocated_operations_;
if (min > 0 && new_size < n_allocated_operations_+min) {
new_size += min;
}
Real* new_multiplier = new Real[new_size];
uIndex* new_index = new uIndex[new_size];
std::memcpy(new_multiplier, multiplier_, n_operations_*sizeof(Real));
std::memcpy(new_index, index_, n_operations_*sizeof(uIndex));
delete[] multiplier_;
delete[] index_;
multiplier_ = new_multiplier;
index_ = new_index;
n_allocated_operations_ = new_size;
}
// ... likewise for the statement stack
void
StackStorageOrig::grow_statement_stack(uIndex min)
{
uIndex new_size = 2*n_allocated_statements_;
if (min > 0 && new_size < n_allocated_statements_+min) {
new_size += min;
}
Statement* new_statement = new Statement[new_size];
std::memcpy(new_statement, statement_,
n_statements_*sizeof(Statement));
delete[] statement_;
statement_ = new_statement;
n_allocated_statements_ = new_size;
}
}
}
// =================================================================
// Contents of Storage.cpp
// =================================================================
/* Storage.cpp -- Global variables recording use of Storage objects
Copyright (C) 2015 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <adept/Storage.h>
namespace adept {
namespace internal {
Index n_storage_objects_created_;
Index n_storage_objects_deleted_;
}
}
// =================================================================
// Contents of cppblas.cpp
// =================================================================
/* cppblas.cpp -- C++ interface to BLAS functions
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
This file provides a C++ interface to selected Level-2 and -3 BLAS
functions in which the precision of the arguments (float versus
double) is inferred via overloading
*/
#include <adept/exception.h>
#include <adept/cppblas.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_BLAS
extern "C" {
void sgemm_(const char* TransA, const char* TransB, const int* M,
const int* N, const int* K, const float* alpha,
const float* A, const int* lda, const float* B, const int* ldb,
const float* beta, const float* C, const int* ldc);
void dgemm_(const char* TransA, const char* TransB, const int* M,
const int* N, const int* K, const double* alpha,
const double* A, const int* lda, const double* B, const int* ldb,
const double* beta, const double* C, const int* ldc);
void sgemv_(const char* TransA, const int* M, const int* N, const float* alpha,
const float* A, const int* lda, const float* X, const int* incX,
const float* beta, const float* Y, const int* incY);
void dgemv_(const char* TransA, const int* M, const int* N, const double* alpha,
const double* A, const int* lda, const double* X, const int* incX,
const double* beta, const double* Y, const int* incY);
void ssymm_(const char* side, const char* uplo, const int* M, const int* N,
const float* alpha, const float* A, const int* lda, const float* B,
const int* ldb, const float* beta, float* C, const int* ldc);
void dsymm_(const char* side, const char* uplo, const int* M, const int* N,
const double* alpha, const double* A, const int* lda, const double* B,
const int* ldb, const double* beta, double* C, const int* ldc);
void ssymv_(const char* uplo, const int* N, const float* alpha, const float* A,
const int* lda, const float* X, const int* incX, const float* beta,
const float* Y, const int* incY);
void dsymv_(const char* uplo, const int* N, const double* alpha, const double* A,
const int* lda, const double* X, const int* incX, const double* beta,
const double* Y, const int* incY);
void sgbmv_(const char* TransA, const int* M, const int* N, const int* kl,
const int* ku, const float* alpha, const float* A, const int* lda,
const float* X, const int* incX, const float* beta,
const float* Y, const int* incY);
void dgbmv_(const char* TransA, const int* M, const int* N, const int* kl,
const int* ku, const double* alpha, const double* A, const int* lda,
const double* X, const int* incX, const double* beta,
const double* Y, const int* incY);
};
namespace adept {
namespace internal {
// Matrix-matrix multiplication for general dense matrices
#define ADEPT_DEFINE_GEMM(T, FUNC, FUNC_COMPLEX) \
void cppblas_gemm(BLAS_ORDER Order, \
BLAS_TRANSPOSE TransA, \
BLAS_TRANSPOSE TransB, \
int M, int N, \
int K, T alpha, const T *A, \
int lda, const T *B, int ldb, \
T beta, T *C, int ldc) { \
if (Order == BlasColMajor) { \
FUNC(&TransA, &TransB, &M, &N, &K, &alpha, A, &lda, \
B, &ldb, &beta, C, &ldc); \
} \
else { \
FUNC(&TransB, &TransA, &N, &M, &K, &alpha, B, &ldb, \
A, &lda, &beta, C, &ldc); \
} \
}
ADEPT_DEFINE_GEMM(double, dgemm_, zgemm_);
ADEPT_DEFINE_GEMM(float, sgemm_, cgemm_);
#undef ADEPT_DEFINE_GEMM
// Matrix-vector multiplication for a general dense matrix
#define ADEPT_DEFINE_GEMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_gemv(const BLAS_ORDER Order, \
const BLAS_TRANSPOSE TransA, \
const int M, const int N, \
const T alpha, const T *A, const int lda, \
const T *X, const int incX, const T beta, \
T *Y, const int incY) { \
if (Order == BlasColMajor) { \
FUNC(&TransA, &M, &N, &alpha, A, &lda, X, &incX, \
&beta, Y, &incY); \
} \
else { \
BLAS_TRANSPOSE TransNew \
= TransA == BlasTrans ? BlasNoTrans : BlasTrans; \
FUNC(&TransNew, &N, &M, &alpha, A, &lda, X, &incX, \
&beta, Y, &incY); \
} \
}
ADEPT_DEFINE_GEMV(double, dgemv_, zgemv_);
ADEPT_DEFINE_GEMV(float, sgemv_, cgemv_);
#undef ADEPT_DEFINE_GEMV
// Matrix-matrix multiplication where matrix A is symmetric
// FIX! CHECK ROW MAJOR VERSION IS RIGHT
#define ADEPT_DEFINE_SYMM(T, FUNC, FUNC_COMPLEX) \
void cppblas_symm(const BLAS_ORDER Order, \
const BLAS_SIDE Side, \
const BLAS_UPLO Uplo, \
const int M, const int N, \
const T alpha, const T *A, const int lda, \
const T *B, const int ldb, const T beta, \
T *C, const int ldc) { \
if (Order == BlasColMajor) { \
FUNC(&Side, &Uplo, &M, &N, &alpha, A, &lda, \
B, &ldb, &beta, C, &ldc); \
} \
else { \
BLAS_SIDE SideNew = Side == BlasLeft ? BlasRight : BlasLeft; \
BLAS_UPLO UploNew = Uplo == BlasUpper ? BlasLower : BlasUpper; \
FUNC(&SideNew, &UploNew, &N, &M, &alpha, A, &lda, \
B, &ldb, &beta, C, &ldc); \
} \
}
ADEPT_DEFINE_SYMM(double, dsymm_, zsymm_);
ADEPT_DEFINE_SYMM(float, ssymm_, csymm_);
#undef ADEPT_DEFINE_SYMM
// Matrix-vector multiplication where the matrix is symmetric
#define ADEPT_DEFINE_SYMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_symv(const BLAS_ORDER Order, \
const BLAS_UPLO Uplo, \
const int N, const T alpha, const T *A, \
const int lda, const T *X, const int incX, \
const T beta, T *Y, const int incY) { \
if (Order == BlasColMajor) { \
FUNC(&Uplo, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY); \
} \
else { \
BLAS_UPLO UploNew = Uplo == BlasUpper ? BlasLower : BlasUpper; \
FUNC(&UploNew, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY); \
} \
}
ADEPT_DEFINE_SYMV(double, dsymv_, zsymv_);
ADEPT_DEFINE_SYMV(float, ssymv_, csymv_);
#undef ADEPT_DEFINE_SYMV
// Matrix-vector multiplication for a general band matrix
#define ADEPT_DEFINE_GBMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_gbmv(const BLAS_ORDER Order, \
const BLAS_TRANSPOSE TransA, \
const int M, const int N, \
const int KL, const int KU, const T alpha,\
const T *A, const int lda, const T *X, \
const int incX, const T beta, T *Y, \
const int incY) { \
if (Order == BlasColMajor) { \
FUNC(&TransA, &M, &N, &KL, &KU, &alpha, A, &lda, \
X, &incX, &beta, Y, &incY); \
} \
else { \
BLAS_TRANSPOSE TransNew \
= TransA == BlasTrans ? BlasNoTrans : BlasTrans; \
FUNC(&TransNew, &N, &M, &KU, &KL, &alpha, A, &lda, \
X, &incX, &beta, Y, &incY); \
} \
}
ADEPT_DEFINE_GBMV(double, dgbmv_, zgbmv_);
ADEPT_DEFINE_GBMV(float, sgbmv_, cgbmv_);
#undef ADEPT_DEFINE_GBMV
} // End namespace internal
} // End namespace adept
#else // Don't have BLAS
namespace adept {
namespace internal {
// Matrix-matrix multiplication for general dense matrices
#define ADEPT_DEFINE_GEMM(T, FUNC, FUNC_COMPLEX) \
void cppblas_gemm(BLAS_ORDER Order, \
BLAS_TRANSPOSE TransA, \
BLAS_TRANSPOSE TransB, \
int M, int N, \
int K, T alpha, const T *A, \
int lda, const T *B, int ldb, \
T beta, T *C, int ldc) { \
throw feature_not_available("Cannot perform matrix-matrix multiplication because compiled without BLAS"); \
}
ADEPT_DEFINE_GEMM(double, dgemm_, zgemm_);
ADEPT_DEFINE_GEMM(float, sgemm_, cgemm_);
#undef ADEPT_DEFINE_GEMM
// Matrix-vector multiplication for a general dense matrix
#define ADEPT_DEFINE_GEMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_gemv(const BLAS_ORDER Order, \
const BLAS_TRANSPOSE TransA, \
const int M, const int N, \
const T alpha, const T *A, const int lda, \
const T *X, const int incX, const T beta, \
T *Y, const int incY) { \
throw feature_not_available("Cannot perform matrix-vector multiplication because compiled without BLAS"); \
}
ADEPT_DEFINE_GEMV(double, dgemv_, zgemv_);
ADEPT_DEFINE_GEMV(float, sgemv_, cgemv_);
#undef ADEPT_DEFINE_GEMV
// Matrix-matrix multiplication where matrix A is symmetric
// FIX! CHECK ROW MAJOR VERSION IS RIGHT
#define ADEPT_DEFINE_SYMM(T, FUNC, FUNC_COMPLEX) \
void cppblas_symm(const BLAS_ORDER Order, \
const BLAS_SIDE Side, \
const BLAS_UPLO Uplo, \
const int M, const int N, \
const T alpha, const T *A, const int lda, \
const T *B, const int ldb, const T beta, \
T *C, const int ldc) { \
throw feature_not_available("Cannot perform symmetric matrix-matrix multiplication because compiled without BLAS"); \
}
ADEPT_DEFINE_SYMM(double, dsymm_, zsymm_);
ADEPT_DEFINE_SYMM(float, ssymm_, csymm_);
#undef ADEPT_DEFINE_SYMM
// Matrix-vector multiplication where the matrix is symmetric
#define ADEPT_DEFINE_SYMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_symv(const BLAS_ORDER Order, \
const BLAS_UPLO Uplo, \
const int N, const T alpha, const T *A, \
const int lda, const T *X, const int incX, \
const T beta, T *Y, const int incY) { \
throw feature_not_available("Cannot perform symmetric matrix-vector multiplication because compiled without BLAS"); \
}
ADEPT_DEFINE_SYMV(double, dsymv_, zsymv_);
ADEPT_DEFINE_SYMV(float, ssymv_, csymv_);
#undef ADEPT_DEFINE_SYMV
// Matrix-vector multiplication for a general band matrix
#define ADEPT_DEFINE_GBMV(T, FUNC, FUNC_COMPLEX) \
void cppblas_gbmv(const BLAS_ORDER Order, \
const BLAS_TRANSPOSE TransA, \
const int M, const int N, \
const int KL, const int KU, const T alpha,\
const T *A, const int lda, const T *X, \
const int incX, const T beta, T *Y, \
const int incY) { \
throw feature_not_available("Cannot perform band matrix-vector multiplication because compiled without BLAS"); \
}
ADEPT_DEFINE_GBMV(double, dgbmv_, zgbmv_);
ADEPT_DEFINE_GBMV(float, sgbmv_, cgbmv_);
#undef ADEPT_DEFINE_GBMV
}
}
#endif
// =================================================================
// Contents of index.cpp
// =================================================================
/* index.cpp -- Definitions of "end" and "__" for array indexing
Copyright (C) 2015 European Centre for Medium-Range Weather Forecasts
Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <adept/RangeIndex.h>
namespace adept {
::adept::internal::EndIndex end;
::adept::internal::AllIndex __;
}
// =================================================================
// Contents of inv.cpp
// =================================================================
/* inv.cpp -- Invert matrices
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <vector>
#include <adept/Array.h>
#include <adept/SpecialMatrix.h>
#ifndef AdeptSource_H
#include "cpplapack.h"
#endif
#ifdef HAVE_LAPACK
namespace adept {
// -------------------------------------------------------------------
// Invert general square matrix A
// -------------------------------------------------------------------
template <typename Type>
Array<2,Type,false>
inv(const Array<2,Type,false>& A) {
using internal::cpplapack_getrf;
using internal::cpplapack_getri;
if (A.dimension(0) != A.dimension(1)) {
throw invalid_operation("Only square matrices can be inverted"
ADEPT_EXCEPTION_LOCATION);
}
Array<2,Type,false> A_;
// LAPACKE is more efficient with column-major input
A_.resize_column_major(A.dimensions());
A_ = A;
std::vector<lapack_int> ipiv(A_.dimension(0));
// lapack_int status = LAPACKE_dgetrf(LAPACK_COL_MAJOR, A_.dimension(0), A_.dimension(1),
// A_.data(), A_.offset(1), &ipiv[0]);
lapack_int status = cpplapack_getrf(A_.dimension(0),
A_.data(), A_.offset(1), &ipiv[0]);
if (status != 0) {
std::stringstream s;
s << "Failed to factorize matrix: LAPACK ?getrf returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
// status = LAPACKE_dgetri(LAPACK_COL_MAJOR, A_.dimension(0),
// A_.data(), A_.offset(1), &ipiv[0]);
status = cpplapack_getri(A_.dimension(0),
A_.data(), A_.offset(1), &ipiv[0]);
if (status != 0) {
std::stringstream s;
s << "Failed to invert matrix: LAPACK ?getri returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
return A_;
}
// -------------------------------------------------------------------
// Invert symmetric matrix A
// -------------------------------------------------------------------
template <typename Type, SymmMatrixOrientation Orient>
SpecialMatrix<Type,SymmEngine<Orient>,false>
inv(const SpecialMatrix<Type,SymmEngine<Orient>,false>& A) {
using internal::cpplapack_sytrf;
using internal::cpplapack_sytri;
SpecialMatrix<Type,SymmEngine<Orient>,false> A_;
A_.resize(A.dimension());
A_ = A;
// Treat symmetric matrix as column-major
char uplo;
if (Orient == ROW_LOWER_COL_UPPER) {
uplo = 'U';
}
else {
uplo = 'L';
}
std::vector<lapack_int> ipiv(A_.dimension(0));
// lapack_int status = LAPACKE_dsytrf(LAPACK_COL_MAJOR, uplo, A_.dimension(),
// A_.data(), A_.offset(), &ipiv[0]);
lapack_int status = cpplapack_sytrf(uplo, A_.dimension(),
A_.data(), A_.offset(), &ipiv[0]);
if (status != 0) {
std::stringstream s;
s << "Failed to factorize symmetric matrix: LAPACK ?sytrf returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
// status = LAPACKE_dsytri(LAPACK_COL_MAJOR, uplo, A_.dimension(),
// A_.data(), A_.offset(), &ipiv[0]);
status = cpplapack_sytri(uplo, A_.dimension(),
A_.data(), A_.offset(), &ipiv[0]);
if (status != 0) {
std::stringstream s;
s << "Failed to invert symmetric matrix: LAPACK ?sytri returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
return A_;
}
}
#else // LAPACK not available
namespace adept {
// -------------------------------------------------------------------
// Invert general square matrix A
// -------------------------------------------------------------------
template <typename Type>
Array<2,Type,false>
inv(const Array<2,Type,false>& A) {
throw feature_not_available("Cannot invert matrix because compiled without LAPACK");
}
// -------------------------------------------------------------------
// Invert symmetric matrix A
// -------------------------------------------------------------------
template <typename Type, SymmMatrixOrientation Orient>
SpecialMatrix<Type,SymmEngine<Orient>,false>
inv(const SpecialMatrix<Type,SymmEngine<Orient>,false>& A) {
throw feature_not_available("Cannot invert matrix because compiled without LAPACK");
}
}
#endif
namespace adept {
// -------------------------------------------------------------------
// Explicit instantiations
// -------------------------------------------------------------------
#define ADEPT_EXPLICIT_INV(TYPE) \
template Array<2,TYPE,false> \
inv(const Array<2,TYPE,false>& A); \
template SpecialMatrix<TYPE,SymmEngine<ROW_LOWER_COL_UPPER>,false> \
inv(const SpecialMatrix<TYPE,SymmEngine<ROW_LOWER_COL_UPPER>,false>&); \
template SpecialMatrix<TYPE,SymmEngine<ROW_UPPER_COL_LOWER>,false> \
inv(const SpecialMatrix<TYPE,SymmEngine<ROW_UPPER_COL_LOWER>,false>&)
ADEPT_EXPLICIT_INV(float);
ADEPT_EXPLICIT_INV(double);
#undef ADEPT_EXPLICIT_INV
}
// =================================================================
// Contents of jacobian.cpp
// =================================================================
/* jacobian.cpp -- Computation of Jacobian matrix
Copyright (C) 2012-2014 University of Reading
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#ifdef _OPENMP
#include <omp.h>
#endif
#include "adept/Stack.h"
#include "adept/Packet.h"
#include "adept/traits.h"
namespace adept {
namespace internal {
static const int MULTIPASS_SIZE = ADEPT_REAL_PACKET_SIZE == 1 ? ADEPT_MULTIPASS_SIZE : ADEPT_REAL_PACKET_SIZE;
}
using namespace internal;
template <typename T>
T _check_long_double() {
// The user may have requested Real to be of type "long double" by
// specifying ADEPT_REAL_TYPE_SIZE=16. If the present system can
// only support double then sizeof(long double) will be 8, but
// Adept will not be emitting the best code for this, so it is
// probably better to fail forcing the user to specify
// ADEPT_REAL_TYPE_SIZE=8.
ADEPT_STATIC_ASSERT(ADEPT_REAL_TYPE_SIZE != 16 || ADEPT_REAL_TYPE_SIZE == sizeof(Real),
COMPILER_DOES_NOT_SUPPORT_16_BYTE_LONG_DOUBLE);
return 1;
}
/*
void
Stack::jacobian_forward_kernel(Real* gradient_multipass_b) const
{
static const int MULTIPASS_SIZE = Packet<Real>::size;
// Loop forward through the derivative statements
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
// We copy the LHS to "a" in case it appears on the RHS in any
// of the following statements
Block<MULTIPASS_SIZE,Real> a; // Initialized to zero automatically
// Loop through operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
Real* __restrict grad = gradient_multipass_b+index_[iop]*MULTIPASS_SIZE;
// Loop through columns within this block; we hope the
// compiler can optimize this loop. Note that it is faster
// to always use MULTIPASS_SIZE, always known at
// compile time, than to use block_size, which is not, even
// though in the last iteration this may involve redundant
// computations.
if (multiplier_[iop] == 1.0) {
// if (__builtin_expect(multiplier_[iop] == 1.0,0)) {
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
// for (uIndex i = 0; i < block_size; i++) {
a[i] += grad[i];
}
}
else {
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
// for (uIndex i = 0; i < block_size; i++) {
a[i] += multiplier_[iop]*grad[i];
}
}
}
// Copy the results
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass_b[statement.index*MULTIPASS_SIZE+i] = a[i];
}
} // End of loop over statements
}
*/
#if ADEPT_REAL_PACKET_SIZE > 1
void
Stack::jacobian_forward_kernel(Real* __restrict gradient_multipass_b) const
{
// Loop forward through the derivative statements
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
// We copy the LHS to "a" in case it appears on the RHS in any
// of the following statements
Packet<Real> a; // Zeroed automatically
// Loop through operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
Packet<Real> g(gradient_multipass_b+index_[iop]*MULTIPASS_SIZE);
Packet<Real> m(multiplier_[iop]);
a += m * g;
}
// Copy the results
a.put(gradient_multipass_b+statement.index*MULTIPASS_SIZE);
} // End of loop over statements
}
#else
void
Stack::jacobian_forward_kernel(Real* __restrict gradient_multipass_b) const
{
// Loop forward through the derivative statements
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
// We copy the LHS to "a" in case it appears on the RHS in any
// of the following statements
Block<MULTIPASS_SIZE,Real> a; // Zeroed automatically
// Loop through operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
a[i] += multiplier_[iop]*gradient_multipass_b[index_[iop]*MULTIPASS_SIZE+i];
}
}
// Copy the results
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass_b[statement.index*MULTIPASS_SIZE+i] = a[i];
}
} // End of loop over statements
}
#endif
void
Stack::jacobian_forward_kernel_extra(Real* __restrict gradient_multipass_b,
uIndex n_extra) const
{
// Loop forward through the derivative statements
for (uIndex ist = 1; ist < n_statements_; ist++) {
const Statement& statement = statement_[ist];
// We copy the LHS to "a" in case it appears on the RHS in any
// of the following statements
Block<MULTIPASS_SIZE,Real> a; // Zeroed automatically
// Loop through operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
for (uIndex i = 0; i < n_extra; i++) {
a[i] += multiplier_[iop]*gradient_multipass_b[index_[iop]*MULTIPASS_SIZE+i];
}
}
// Copy the results
for (uIndex i = 0; i < n_extra; i++) {
gradient_multipass_b[statement.index*MULTIPASS_SIZE+i] = a[i];
}
} // End of loop over statements
}
// Compute the Jacobian matrix, parallelized using OpenMP. Normally
// the user would call the jacobian or jacobian_forward functions,
// and the OpenMP version would only be called if OpenMP is
// available and the Jacobian matrix is large enough for
// parallelization to be worthwhile. Note that jacobian_out must be
// allocated to be of size m*n, where m is the number of dependent
// variables and n is the number of independents. The independents
// and dependents must have already been identified with the
// functions "independent" and "dependent", otherwise this function
// will fail with FAILURE_XXDEPENDENT_NOT_IDENTIFIED. In the
// resulting matrix, the "m" dimension of the matrix varies
// fastest. This is implemented using a forward pass, appropriate
// for m>=n.
void
Stack::jacobian_forward_openmp(Real* jacobian_out) const
{
// Number of blocks to cycle through, including a possible last
// block containing fewer than MULTIPASS_SIZE variables
int n_block = (n_independent() + MULTIPASS_SIZE - 1)
/ MULTIPASS_SIZE;
uIndex n_extra = n_independent() % MULTIPASS_SIZE;
int iblock;
#pragma omp parallel
{
// std::vector<Block<MULTIPASS_SIZE,Real> >
// gradient_multipass_b(max_gradient_);
uIndex gradient_multipass_size = max_gradient_*MULTIPASS_SIZE;
Real* __restrict gradient_multipass_b
= alloc_aligned<Real>(gradient_multipass_size);
#pragma omp for schedule(static)
for (iblock = 0; iblock < n_block; iblock++) {
// Set the index to the dependent variables for this block
uIndex i_independent = MULTIPASS_SIZE * iblock;
uIndex block_size = MULTIPASS_SIZE;
// If this is the last iteration and the number of extra
// elements is non-zero, then set the block size to the number
// of extra elements. If the number of extra elements is zero,
// then the number of independent variables is exactly divisible
// by MULTIPASS_SIZE, so the last iteration will be the
// same as all the rest.
if (iblock == n_block-1 && n_extra > 0) {
block_size = n_extra;
}
// Set the initial gradients all to zero
for (std::size_t i = 0; i < gradient_multipass_size; i++) {
gradient_multipass_b[i] = 0.0;
}
// Each seed vector has one non-zero entry of 1.0
for (uIndex i = 0; i < block_size; i++) {
gradient_multipass_b[independent_index_[i_independent+i]*MULTIPASS_SIZE+i] = 1.0;
}
jacobian_forward_kernel(gradient_multipass_b);
// Copy the gradients corresponding to the dependent variables
// into the Jacobian matrix
for (uIndex idep = 0; idep < n_dependent(); idep++) {
for (uIndex i = 0; i < block_size; i++) {
jacobian_out[(i_independent+i)*n_dependent()+idep]
= gradient_multipass_b[dependent_index_[idep]*MULTIPASS_SIZE+i];
}
}
} // End of loop over blocks
free_aligned(gradient_multipass_b);
} // End of parallel section
} // End of jacobian function
// Compute the Jacobian matrix; note that jacobian_out must be
// allocated to be of size m*n, where m is the number of dependent
// variables and n is the number of independents. The independents
// and dependents must have already been identified with the
// functions "independent" and "dependent", otherwise this function
// will fail with FAILURE_XXDEPENDENT_NOT_IDENTIFIED. In the
// resulting matrix, the "m" dimension of the matrix varies
// fastest. This is implemented using a forward pass, appropriate
// for m>=n.
void
Stack::jacobian_forward(Real* jacobian_out)
{
if (independent_index_.empty() || dependent_index_.empty()) {
throw(dependents_or_independents_not_identified());
}
#ifdef _OPENMP
if (have_openmp_
&& !openmp_manually_disabled_
&& n_independent() > MULTIPASS_SIZE
&& omp_get_max_threads() > 1) {
// Call the parallel version
jacobian_forward_openmp(jacobian_out);
return;
}
#endif
// For optimization reasons, we process a block of
// MULTIPASS_SIZE columns of the Jacobian at once; calculate
// how many blocks are needed and how many extras will remain
uIndex n_block = n_independent() / MULTIPASS_SIZE;
uIndex n_extra = n_independent() % MULTIPASS_SIZE;
///gradient_multipass_.resize(max_gradient_);
uIndex gradient_multipass_size = max_gradient_*MULTIPASS_SIZE;
Real* __restrict gradient_multipass_b
= alloc_aligned<Real>(gradient_multipass_size);
// Loop over blocks of MULTIPASS_SIZE columns
for (uIndex iblock = 0; iblock < n_block; iblock++) {
// Set the index to the dependent variables for this block
uIndex i_independent = MULTIPASS_SIZE * iblock;
// Set the initial gradients all to zero
///zero_gradient_multipass();
for (std::size_t i = 0; i < gradient_multipass_size; i++) {
gradient_multipass_b[i] = 0.0;
}
// Each seed vector has one non-zero entry of 1.0
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass_b[independent_index_[i_independent+i]*MULTIPASS_SIZE+i] = 1.0;
}
jacobian_forward_kernel(gradient_multipass_b);
// Copy the gradients corresponding to the dependent variables
// into the Jacobian matrix
for (uIndex idep = 0; idep < n_dependent(); idep++) {
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
jacobian_out[(i_independent+i)*n_dependent()+idep]
= gradient_multipass_b[dependent_index_[idep]*MULTIPASS_SIZE+i];
}
}
i_independent += MULTIPASS_SIZE;
} // End of loop over blocks
// Now do the same but for the remaining few columns in the matrix
if (n_extra > 0) {
uIndex i_independent = MULTIPASS_SIZE * n_block;
///zero_gradient_multipass();
for (std::size_t i = 0; i < gradient_multipass_size; i++) {
gradient_multipass_b[i] = 0.0;
}
for (uIndex i = 0; i < n_extra; i++) {
gradient_multipass_b[independent_index_[i_independent+i]*MULTIPASS_SIZE+i] = 1.0;
}
jacobian_forward_kernel_extra(gradient_multipass_b, n_extra);
for (uIndex idep = 0; idep < n_dependent(); idep++) {
for (uIndex i = 0; i < n_extra; i++) {
jacobian_out[(i_independent+i)*n_dependent()+idep]
= gradient_multipass_b[dependent_index_[idep]*MULTIPASS_SIZE+i];
}
}
}
free_aligned(gradient_multipass_b);
}
// Compute the Jacobian matrix, parallelized using OpenMP. Normally
// the user would call the jacobian or jacobian_reverse functions,
// and the OpenMP version would only be called if OpenMP is
// available and the Jacobian matrix is large enough for
// parallelization to be worthwhile. Note that jacobian_out must be
// allocated to be of size m*n, where m is the number of dependent
// variables and n is the number of independents. The independents
// and dependents must have already been identified with the
// functions "independent" and "dependent", otherwise this function
// will fail with FAILURE_XXDEPENDENT_NOT_IDENTIFIED. In the
// resulting matrix, the "m" dimension of the matrix varies
// fastest. This is implemented using a reverse pass, appropriate
// for m<n.
void
Stack::jacobian_reverse_openmp(Real* jacobian_out) const
{
// Number of blocks to cycle through, including a possible last
// block containing fewer than MULTIPASS_SIZE variables
int n_block = (n_dependent() + MULTIPASS_SIZE - 1)
/ MULTIPASS_SIZE;
uIndex n_extra = n_dependent() % MULTIPASS_SIZE;
int iblock;
// Inside the OpenMP loop, the "this" pointer may be NULL if the
// adept::Stack pointer is declared as thread-local and if the
// OpenMP memory model uses thread-local storage for private
// data. If this is the case then local pointers to or copies of
// the following members of the adept::Stack object may need to be
// made: dependent_index_ n_statements_ statement_ multiplier_
// index_ independent_index_ n_dependent() n_independent().
// Limited testing implies this is OK though.
#pragma omp parallel
{
std::vector<Block<MULTIPASS_SIZE,Real> >
gradient_multipass_b(max_gradient_);
#pragma omp for schedule(static)
for (iblock = 0; iblock < n_block; iblock++) {
// Set the index to the dependent variables for this block
uIndex i_dependent = MULTIPASS_SIZE * iblock;
uIndex block_size = MULTIPASS_SIZE;
// If this is the last iteration and the number of extra
// elements is non-zero, then set the block size to the number
// of extra elements. If the number of extra elements is zero,
// then the number of independent variables is exactly divisible
// by MULTIPASS_SIZE, so the last iteration will be the
// same as all the rest.
if (iblock == n_block-1 && n_extra > 0) {
block_size = n_extra;
}
// Set the initial gradients all to zero
for (std::size_t i = 0; i < gradient_multipass_b.size(); i++) {
gradient_multipass_b[i].zero();
}
// Each seed vector has one non-zero entry of 1.0
for (uIndex i = 0; i < block_size; i++) {
gradient_multipass_b[dependent_index_[i_dependent+i]][i] = 1.0;
}
// Loop backward through the derivative statements
for (uIndex ist = n_statements_-1; ist > 0; ist--) {
const Statement& statement = statement_[ist];
// We copy the RHS to "a" in case it appears on the LHS in any
// of the following statements
Real a[MULTIPASS_SIZE];
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
// For large blocks, we only process the ones where a[i] is
// non-zero
uIndex i_non_zero[MULTIPASS_SIZE];
#endif
uIndex n_non_zero = 0;
for (uIndex i = 0; i < block_size; i++) {
a[i] = gradient_multipass_b[statement.index][i];
gradient_multipass_b[statement.index][i] = 0.0;
if (a[i] != 0.0) {
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
i_non_zero[n_non_zero++] = i;
#else
n_non_zero = 1;
#endif
}
}
// Only do anything for this statement if any of the a values
// are non-zero
if (n_non_zero) {
// Loop through the operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
// Try to minimize pointer dereferencing by making local
// copies
Real multiplier = multiplier_[iop];
Real* __restrict gradient_multipass
= &(gradient_multipass_b[index_[iop]][0]);
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
// For large blocks, loop over only the indices
// corresponding to non-zero a
for (uIndex i = 0; i < n_non_zero; i++) {
gradient_multipass[i_non_zero[i]] += multiplier*a[i_non_zero[i]];
}
#else
// For small blocks, do all indices
for (uIndex i = 0; i < block_size; i++) {
// for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass[i] += multiplier*a[i];
}
#endif
}
}
} // End of loop over statement
// Copy the gradients corresponding to the independent
// variables into the Jacobian matrix
for (uIndex iindep = 0; iindep < n_independent(); iindep++) {
for (uIndex i = 0; i < block_size; i++) {
jacobian_out[iindep*n_dependent()+i_dependent+i]
= gradient_multipass_b[independent_index_[iindep]][i];
}
}
} // End of loop over blocks
} // end #pragma omp parallel
} // end jacobian_reverse_openmp
// Compute the Jacobian matrix; note that jacobian_out must be
// allocated to be of size m*n, where m is the number of dependent
// variables and n is the number of independents. The independents
// and dependents must have already been identified with the
// functions "independent" and "dependent", otherwise this function
// will fail with FAILURE_XXDEPENDENT_NOT_IDENTIFIED. In the
// resulting matrix, the "m" dimension of the matrix varies
// fastest. This is implemented using a reverse pass, appropriate
// for m<n.
void
Stack::jacobian_reverse(Real* jacobian_out)
{
if (independent_index_.empty() || dependent_index_.empty()) {
throw(dependents_or_independents_not_identified());
}
#ifdef _OPENMP
if (have_openmp_
&& !openmp_manually_disabled_
&& n_dependent() > MULTIPASS_SIZE
&& omp_get_max_threads() > 1) {
// Call the parallel version
jacobian_reverse_openmp(jacobian_out);
return;
}
#endif
// gradient_multipass_.resize(max_gradient_);
std::vector<Block<MULTIPASS_SIZE,Real> >
gradient_multipass_b(max_gradient_);
// For optimization reasons, we process a block of
// MULTIPASS_SIZE rows of the Jacobian at once; calculate
// how many blocks are needed and how many extras will remain
uIndex n_block = n_dependent() / MULTIPASS_SIZE;
uIndex n_extra = n_dependent() % MULTIPASS_SIZE;
uIndex i_dependent = 0; // uIndex of first row in the block we are
// currently computing
// Loop over the of MULTIPASS_SIZE rows
for (uIndex iblock = 0; iblock < n_block; iblock++) {
// Set the initial gradients all to zero
// zero_gradient_multipass();
for (std::size_t i = 0; i < gradient_multipass_b.size(); i++) {
gradient_multipass_b[i].zero();
}
// Each seed vector has one non-zero entry of 1.0
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass_b[dependent_index_[i_dependent+i]][i] = 1.0;
}
// Loop backward through the derivative statements
for (uIndex ist = n_statements_-1; ist > 0; ist--) {
const Statement& statement = statement_[ist];
// We copy the RHS to "a" in case it appears on the LHS in any
// of the following statements
Real a[MULTIPASS_SIZE];
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
// For large blocks, we only process the ones where a[i] is
// non-zero
uIndex i_non_zero[MULTIPASS_SIZE];
#endif
uIndex n_non_zero = 0;
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
a[i] = gradient_multipass_b[statement.index][i];
gradient_multipass_b[statement.index][i] = 0.0;
if (a[i] != 0.0) {
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
i_non_zero[n_non_zero++] = i;
#else
n_non_zero = 1;
#endif
}
}
// Only do anything for this statement if any of the a values
// are non-zero
if (n_non_zero) {
// Loop through the operations
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
// Try to minimize pointer dereferencing by making local
// copies
Real multiplier = multiplier_[iop];
Real* __restrict gradient_multipass
= &(gradient_multipass_b[index_[iop]][0]);
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
// For large blocks, loop over only the indices
// corresponding to non-zero a
for (uIndex i = 0; i < n_non_zero; i++) {
gradient_multipass[i_non_zero[i]] += multiplier*a[i_non_zero[i]];
}
#else
// For small blocks, do all indices
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
gradient_multipass[i] += multiplier*a[i];
}
#endif
}
}
} // End of loop over statement
// Copy the gradients corresponding to the independent variables
// into the Jacobian matrix
for (uIndex iindep = 0; iindep < n_independent(); iindep++) {
for (uIndex i = 0; i < MULTIPASS_SIZE; i++) {
jacobian_out[iindep*n_dependent()+i_dependent+i]
= gradient_multipass_b[independent_index_[iindep]][i];
}
}
i_dependent += MULTIPASS_SIZE;
} // End of loop over blocks
// Now do the same but for the remaining few rows in the matrix
if (n_extra > 0) {
for (std::size_t i = 0; i < gradient_multipass_b.size(); i++) {
gradient_multipass_b[i].zero();
}
// zero_gradient_multipass();
for (uIndex i = 0; i < n_extra; i++) {
gradient_multipass_b[dependent_index_[i_dependent+i]][i] = 1.0;
}
for (uIndex ist = n_statements_-1; ist > 0; ist--) {
const Statement& statement = statement_[ist];
Real a[MULTIPASS_SIZE];
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
uIndex i_non_zero[MULTIPASS_SIZE];
#endif
uIndex n_non_zero = 0;
for (uIndex i = 0; i < n_extra; i++) {
a[i] = gradient_multipass_b[statement.index][i];
gradient_multipass_b[statement.index][i] = 0.0;
if (a[i] != 0.0) {
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
i_non_zero[n_non_zero++] = i;
#else
n_non_zero = 1;
#endif
}
}
if (n_non_zero) {
for (uIndex iop = statement_[ist-1].end_plus_one;
iop < statement.end_plus_one; iop++) {
Real multiplier = multiplier_[iop];
Real* __restrict gradient_multipass
= &(gradient_multipass_b[index_[iop]][0]);
// if (index_[iop] > max_gradient_-1
// || index_[iop] < 0) {
// std::cerr << "AAAAAA: iop=" << iop << " index_[iop]=" << index_[iop] << " max_gradient_=" << max_gradient_ << " ist=" << ist << "\n";
// }
#if MULTIPASS_SIZE > MULTIPASS_SIZE_ZERO_CHECK
for (uIndex i = 0; i < n_non_zero; i++) {
gradient_multipass[i_non_zero[i]] += multiplier*a[i_non_zero[i]];
}
#else
for (uIndex i = 0; i < n_extra; i++) {
// std::cerr << "BBBBB: i=" << i << " gradient_multipass[i]=" << gradient_multipass[i] << " multiplier=" << multiplier << " a[i]=" << a[i] << "\n";
gradient_multipass[i] += multiplier*a[i];
}
#endif
}
}
}
for (uIndex iindep = 0; iindep < n_independent(); iindep++) {
for (uIndex i = 0; i < n_extra; i++) {
jacobian_out[iindep*n_dependent()+i_dependent+i]
= gradient_multipass_b[independent_index_[iindep]][i];
}
}
}
}
// Compute the Jacobian matrix; note that jacobian_out must be
// allocated to be of size m*n, where m is the number of dependent
// variables and n is the number of independents. In the resulting
// matrix, the "m" dimension of the matrix varies fastest. This is
// implemented by calling one of jacobian_forward and
// jacobian_reverse, whichever would be faster.
void
Stack::jacobian(Real* jacobian_out)
{
// std::cout << ">>> Computing " << n_dependent() << "x" << n_independent()
// << " Jacobian from " << n_statements_ << " statements, "
// << n_operations() << " operations and " << max_gradient_ << " gradients\n";
if (n_independent() <= n_dependent()) {
jacobian_forward(jacobian_out);
}
else {
jacobian_reverse(jacobian_out);
}
}
} // End namespace adept
// =================================================================
// Contents of settings.cpp
// =================================================================
/* settings.cpp -- View/change the overall Adept settings
Copyright (C) 2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <sstream>
#include <cstring>
#include <adept/base.h>
#include <adept/settings.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_OPENBLAS_CBLAS_HEADER
#include <cblas.h>
#endif
namespace adept {
// -------------------------------------------------------------------
// Get compile-time settings
// -------------------------------------------------------------------
// Return the version of Adept at compile time
std::string
version()
{
return ADEPT_VERSION_STR;
}
// Return the compiler used to compile the Adept library (e.g. "g++
// [4.3.2]" or "Microsoft Visual C++ [1800]")
std::string
compiler_version()
{
#ifdef CXX
std::string cv = CXX; // Defined in config.h
#elif defined(_MSC_VER)
std::string cv = "Microsoft Visual C++";
#else
std::string cv = "unknown";
#endif
#ifdef __GNUC__
#define STRINGIFY3(A,B,C) STRINGIFY(A) "." STRINGIFY(B) "." STRINGIFY(C)
#define STRINGIFY(A) #A
cv += " [" STRINGIFY3(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) "]";
#undef STRINGIFY
#undef STRINGIFY3
#elif defined(_MSC_VER)
#define STRINGIFY1(A) STRINGIFY(A)
#define STRINGIFY(A) #A
cv += " [" STRINGIFY1(_MSC_VER) "]";
#undef STRINGIFY
#undef STRINGIFY1
#endif
return cv;
}
// Return the compiler flags used when compiling the Adept library
// (e.g. "-Wall -g -O3")
std::string
compiler_flags()
{
#ifdef CXXFLAGS
return CXXFLAGS; // Defined in config.h
#else
return "unknown";
#endif
}
// Return a multi-line string listing numerous aspects of the way
// Adept has been configured.
std::string
configuration()
{
std::stringstream s;
s << "Adept version " << adept::version() << ":\n";
s << " Compiled with " << adept::compiler_version() << "\n";
s << " Compiler flags \"" << adept::compiler_flags() << "\"\n";
#ifdef BLAS_LIBS
if (std::strlen(BLAS_LIBS) > 2) {
const char* blas_libs = BLAS_LIBS + 2;
s << " BLAS support from " << blas_libs << " library\n";
}
else {
s << " BLAS support from built-in library\n";
}
#endif
#ifdef HAVE_OPENBLAS_CBLAS_HEADER
s << " Number of BLAS threads may be specified up to maximum of "
<< max_blas_threads() << "\n";
#endif
s << " Jacobians processed in blocks of size "
<< ADEPT_MULTIPASS_SIZE << "\n";
return s.str();
}
// -------------------------------------------------------------------
// Get/set number of threads for array operations
// -------------------------------------------------------------------
// Get the maximum number of threads available for BLAS operations
int
max_blas_threads()
{
#ifdef HAVE_OPENBLAS_CBLAS_HEADER
return openblas_get_num_threads();
#else
return 1;
#endif
}
// Set the maximum number of threads available for BLAS operations
// (zero means use the maximum sensible number on the current
// system), and return the number actually set. Note that OpenBLAS
// uses pthreads and the Jacobian calculation uses OpenMP - this can
// lead to inefficient behaviour so if you are computing Jacobians
// then you may get better performance by setting the number of
// array threads to one.
int
set_max_blas_threads(int n)
{
#ifdef HAVE_OPENBLAS_CBLAS_HEADER
openblas_set_num_threads(n);
return openblas_get_num_threads();
#else
return 1;
#endif
}
// Was the library compiled with matrix multiplication support (from
// BLAS)?
bool
have_matrix_multiplication() {
#ifdef HAVE_BLAS
return true;
#else
return false;
#endif
}
// Was the library compiled with linear algebra support (e.g. inv
// and solve from LAPACK)
bool
have_linear_algebra() {
#ifdef HAVE_LAPACK
return true;
#else
return false;
#endif
}
} // End namespace adept
// =================================================================
// Contents of solve.cpp
// =================================================================
/* solve.cpp -- Solve systems of linear equations using LAPACK
Copyright (C) 2015-2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <vector>
#include <adept/solve.h>
#include <adept/Array.h>
#include <adept/SpecialMatrix.h>
// If ADEPT_SOURCE_H is defined then we are in a header file generated
// from all the source files, so cpplapack.h will already have been
// included
#ifndef AdeptSource_H
#include "cpplapack.h"
#endif
#ifdef HAVE_LAPACK
namespace adept {
// -------------------------------------------------------------------
// Solve Ax = b for general square matrix A
// -------------------------------------------------------------------
template <typename T>
Array<1,T,false>
solve(const Array<2,T,false>& A, const Array<1,T,false>& b) {
Array<2,T,false> A_;
Array<1,T,false> b_;
// LAPACKE is more efficient with column-major input
// if (A.is_row_contiguous()) {
A_.resize_column_major(A.dimensions());
A_ = A;
// }
// else {
// A_.link(A);
// }
// if (b_.offset(0) != 0) {
b_ = b;
// }
// else {
// b_.link(b);
// }
std::vector<lapack_int> ipiv(A_.dimension(0));
// lapack_int status = LAPACKE_dgesv(LAPACK_COL_MAJOR, A_.dimension(0), 1,
// A_.data(), A_.offset(1), &ipiv[0],
// b_.data(), b_.dimension(0));
lapack_int status = cpplapack_gesv(A_.dimension(0), 1,
A_.data(), A_.offset(1), &ipiv[0],
b_.data(), b_.dimension(0));
if (status != 0) {
std::stringstream s;
s << "Failed to solve general system of equations: LAPACK ?gesv returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
return b_;
}
// -------------------------------------------------------------------
// Solve AX = B for general square matrix A and rectangular matrix B
// -------------------------------------------------------------------
template <typename T>
Array<2,T,false>
solve(const Array<2,T,false>& A, const Array<2,T,false>& B) {
Array<2,T,false> A_;
Array<2,T,false> B_;
// LAPACKE is more efficient with column-major input
// if (A.is_row_contiguous()) {
A_.resize_column_major(A.dimensions());
A_ = A;
// }
// else {
// A_.link(A);
// }
// if (B.is_row_contiguous()) {
B_.resize_column_major(B.dimensions());
B_ = B;
// }
// else {
// B_.link(B);
// }
std::vector<lapack_int> ipiv(A_.dimension(0));
// lapack_int status = LAPACKE_dgesv(LAPACK_COL_MAJOR, A_.dimension(0), B.dimension(1),
// A_.data(), A_.offset(1), &ipiv[0],
// B_.data(), B_.offset(1));
lapack_int status = cpplapack_gesv(A_.dimension(0), B.dimension(1),
A_.data(), A_.offset(1), &ipiv[0],
B_.data(), B_.offset(1));
if (status != 0) {
std::stringstream s;
s << "Failed to solve general system of equations for matrix RHS: LAPACK ?gesv returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
return B_;
}
// -------------------------------------------------------------------
// Solve Ax = b for symmetric square matrix A
// -------------------------------------------------------------------
template <typename T, SymmMatrixOrientation Orient>
Array<1,T,false>
solve(const SpecialMatrix<T,SymmEngine<Orient>,false>& A,
const Array<1,T,false>& b) {
SpecialMatrix<T,SymmEngine<Orient>,false> A_;
Array<1,T,false> b_;
// Not sure why the original code copies A...
A_.resize(A.dimension());
A_ = A;
// A_.link(A);
// if (b.offset(0) != 1) {
b_ = b;
// }
// else {
// b_.link(b);
// }
// Treat symmetric matrix as column-major
char uplo;
if (Orient == ROW_LOWER_COL_UPPER) {
uplo = 'U';
}
else {
uplo = 'L';
}
std::vector<lapack_int> ipiv(A_.dimension());
// lapack_int status = LAPACKE_dsysv(LAPACK_COL_MAJOR, uplo, A_.dimension(0), 1,
// A_.data(), A_.offset(), &ipiv[0],
// b_.data(), b_.dimension(0));
lapack_int status = cpplapack_sysv(uplo, A_.dimension(0), 1,
A_.data(), A_.offset(), &ipiv[0],
b_.data(), b_.dimension(0));
if (status != 0) {
// std::stringstream s;
// s << "Failed to solve symmetric system of equations: LAPACK ?sysv returned code " << status;
// throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
std::cerr << "Warning: LAPACK solve symmetric system failed (?sysv): trying general (?gesv)\n";
return solve(Array<2,T,false>(A_),b_);
}
return b_;
}
// -------------------------------------------------------------------
// Solve AX = B for symmetric square matrix A
// -------------------------------------------------------------------
template <typename T, SymmMatrixOrientation Orient>
Array<2,T,false>
solve(const SpecialMatrix<T,SymmEngine<Orient>,false>& A,
const Array<2,T,false>& B) {
SpecialMatrix<T,SymmEngine<Orient>,false> A_;
Array<2,T,false> B_;
A_.resize(A.dimension());
A_ = A;
// A_.link(A);
// if (B.is_row_contiguous()) {
B_.resize_column_major(B.dimensions());
B_ = B;
// }
// else {
// B_.link(B);
// }
// Treat symmetric matrix as column-major
char uplo;
if (Orient == ROW_LOWER_COL_UPPER) {
uplo = 'U';
}
else {
uplo = 'L';
}
std::vector<lapack_int> ipiv(A_.dimension());
// lapack_int status = LAPACKE_dsysv(LAPACK_COL_MAJOR, uplo, A_.dimension(0), B.dimension(1),
// A_.data(), A_.offset(), &ipiv[0],
// B_.data(), B_.offset(1));
lapack_int status = cpplapack_sysv(uplo, A_.dimension(0), B.dimension(1),
A_.data(), A_.offset(), &ipiv[0],
B_.data(), B_.offset(1));
if (status != 0) {
std::stringstream s;
s << "Failed to solve symmetric system of equations with matrix RHS: LAPACK ?sysv returned code " << status;
throw(matrix_ill_conditioned(s.str() ADEPT_EXCEPTION_LOCATION));
}
return B_;
}
}
#else
namespace adept {
// -------------------------------------------------------------------
// Solve Ax = b for general square matrix A
// -------------------------------------------------------------------
template <typename T>
Array<1,T,false>
solve(const Array<2,T,false>& A, const Array<1,T,false>& b) {
throw feature_not_available("Cannot solve linear equations because compiled without LAPACK");
}
// -------------------------------------------------------------------
// Solve AX = B for general square matrix A and rectangular matrix B
// -------------------------------------------------------------------
template <typename T>
Array<2,T,false>
solve(const Array<2,T,false>& A, const Array<2,T,false>& B) {
throw feature_not_available("Cannot solve linear equations because compiled without LAPACK");
}
// -------------------------------------------------------------------
// Solve Ax = b for symmetric square matrix A
// -------------------------------------------------------------------
template <typename T, SymmMatrixOrientation Orient>
Array<1,T,false>
solve(const SpecialMatrix<T,SymmEngine<Orient>,false>& A,
const Array<1,T,false>& b) {
throw feature_not_available("Cannot solve linear equations because compiled without LAPACK");
}
// -------------------------------------------------------------------
// Solve AX = B for symmetric square matrix A
// -------------------------------------------------------------------
template <typename T, SymmMatrixOrientation Orient>
Array<2,T,false>
solve(const SpecialMatrix<T,SymmEngine<Orient>,false>& A,
const Array<2,T,false>& B) {
throw feature_not_available("Cannot solve linear equations because compiled without LAPACK");
}
}
#endif
namespace adept {
// -------------------------------------------------------------------
// Explicit instantiations
// -------------------------------------------------------------------
#define ADEPT_EXPLICIT_SOLVE(TYPE,RRANK) \
template Array<RRANK,TYPE,false> \
solve(const Array<2,TYPE,false>& A, const Array<RRANK,TYPE,false>& b); \
template Array<RRANK,TYPE,false> \
solve(const SpecialMatrix<TYPE,SymmEngine<ROW_LOWER_COL_UPPER>,false>& A, \
const Array<RRANK,TYPE,false>& b); \
template Array<RRANK,TYPE,false> \
solve(const SpecialMatrix<TYPE,SymmEngine<ROW_UPPER_COL_LOWER>,false>& A, \
const Array<RRANK,TYPE,false>& b);
ADEPT_EXPLICIT_SOLVE(float,1)
ADEPT_EXPLICIT_SOLVE(float,2)
ADEPT_EXPLICIT_SOLVE(double,1)
ADEPT_EXPLICIT_SOLVE(double,2)
#undef ADEPT_EXPLICIT_SOLVE
}
// =================================================================
// Contents of vector_utilities.cpp
// =================================================================
/* vector_utilities.cpp -- Vector utility functions
Copyright (C) 2016 European Centre for Medium-Range Weather Forecasts
Author: Robin Hogan <r.j.hogan@ecmwf.int>
This file is part of the Adept library.
*/
#include <adept/vector_utilities.h>
namespace adept {
Array<1,Real,false>
linspace(Real x1, Real x2, Index n) {
Array<1,Real,false> ans(n);
if (n > 1) {
for (Index i = 0; i < n; ++i) {
ans(i) = x1 + (x2-x1)*i / static_cast<Real>(n-1);
}
}
else if (n == 1 && x1 == x2) {
ans(0) = x1;
return ans;
}
else if (n == 1) {
throw(invalid_operation("linspace(x1,x2,n) with n=1 only valid if x1=x2"));
}
return ans;
}
}
#endif
|
pr67495.c | /* PR c/67495 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
int a, b, c;
void
foo (void)
{
#pragma omp atomic capture
a = (float)a + b; /* { dg-error "invalid operator" } */
#pragma omp atomic read
(float) a = b; /* { dg-error "lvalue required" } */
#pragma omp atomic write
(float) a = b; /* { dg-error "lvalue required" } */
#pragma omp atomic read
a = (float) b; /* { dg-error "lvalue required" } */
#pragma omp atomic capture
(float) a = b += c; /* { dg-error "lvalue required" } */
#pragma omp atomic capture
{ a += b; (float) c = a; } /* { dg-error "lvalue required" } */
#pragma omp atomic capture
{ a += b; c = (float) a; } /* { dg-error "uses two different expressions for memory" } */
#pragma omp atomic capture
a = (int)a + b; /* { dg-error "invalid operator" } */
#pragma omp atomic read
(int) a = b; /* { dg-error "lvalue required" } */
#pragma omp atomic write
(int) a = b; /* { dg-error "lvalue required" } */
#pragma omp atomic read
a = (int) b; /* { dg-error "lvalue required" } */
#pragma omp atomic capture
(int) a = b += c; /* { dg-error "lvalue required" } */
#pragma omp atomic capture
{ a += b; (int) c = a; } /* { dg-error "lvalue required" } */
#pragma omp atomic capture
{ a += b; c = (int) a; } /* { dg-error "lvalue required" } */
}
|
omp_for_schedule_guided.c | // RUN: %libomp-compile-and-run
/* Test for guided scheduling
* Ensure threads get chunks interleavely first
* Then judge the chunk sizes are decreasing to a stable value
* Modified by Chunhua Liao
* For example, 100 iteration on 2 threads, chunksize 7
* one line for each dispatch, 0/1 means thread id
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24
* 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14
* 1 1 1 1 1 1 1 1 1 1 10
* 0 0 0 0 0 0 0 0 8
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 0 0 7
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 5
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#define CFSMAX_SIZE 1000
#define MAX_TIME 0.005
#ifdef SLEEPTIME
#undef SLEEPTIME
#define SLEEPTIME 0.0001
#endif
int test_omp_for_schedule_guided()
{
int * tids;
int * chunksizes;
int notout;
int maxiter;
int threads;
int i;
int result;
tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1));
maxiter = 0;
result = 1;
notout = 1;
/* Testing if enough threads are available for this check. */
#pragma omp parallel
{
#pragma omp single
{
threads = omp_get_num_threads();
}
}
/* ensure there are at least two threads */
if (threads < 2) {
omp_set_num_threads(2);
threads = 2;
}
/* Now the real parallel work:
* Each thread will start immediately with the first chunk.
*/
#pragma omp parallel shared(tids,maxiter)
{ /* begin of parallel */
double count;
int tid;
int j;
tid = omp_get_thread_num ();
#pragma omp for nowait schedule(guided)
for(j = 0; j < CFSMAX_SIZE; ++j) {
count = 0.;
#pragma omp flush(maxiter)
if (j > maxiter) {
#pragma omp critical
{
maxiter = j;
}
}
/*printf ("thread %d sleeping\n", tid);*/
#pragma omp flush(maxiter,notout)
while (notout && (count < MAX_TIME) && (maxiter == j)) {
#pragma omp flush(maxiter,notout)
my_sleep (SLEEPTIME);
count += SLEEPTIME;
#ifdef VERBOSE
printf(".");
#endif
}
#ifdef VERBOSE
if (count > 0.) printf(" waited %lf s\n", count);
#endif
/*printf ("thread %d awake\n", tid);*/
tids[j] = tid;
#ifdef VERBOSE
printf("%d finished by %d\n",j,tid);
#endif
} /* end of for */
notout = 0;
#pragma omp flush(maxiter,notout)
} /* end of parallel */
/*******************************************************
* evaluation of the values *
*******************************************************/
{
int determined_chunksize = 1;
int last_threadnr = tids[0];
int global_chunknr = 0;
int openwork = CFSMAX_SIZE;
int expected_chunk_size;
int* local_chunknr = (int*)malloc(threads * sizeof(int));
double c = 1;
for (i = 0; i < threads; i++)
local_chunknr[i] = 0;
tids[CFSMAX_SIZE] = -1;
/*
* determine the number of global chunks
*/
// fprintf(stderr,"# global_chunknr thread local_chunknr chunksize\n");
for(i = 1; i <= CFSMAX_SIZE; ++i) {
if (last_threadnr==tids[i]) {
determined_chunksize++;
} else {
/* fprintf(stderr, "%d\t%d\t%d\t%d\n", global_chunknr,
last_threadnr, local_chunknr[last_threadnr], m); */
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
/* now allocate the memory for saving the sizes of the global chunks */
chunksizes = (int*)malloc(global_chunknr * sizeof(int));
/*
* Evaluate the sizes of the global chunks
*/
global_chunknr = 0;
determined_chunksize = 1;
last_threadnr = tids[0];
for (i = 1; i <= CFSMAX_SIZE; ++i) {
/* If the threadnumber was the same as before increase the
* detected chunksize for this chunk otherwise set the detected
* chunksize again to one and save the number of the next
* thread in last_threadnr.
*/
if (last_threadnr == tids[i]) {
determined_chunksize++;
} else {
chunksizes[global_chunknr] = determined_chunksize;
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
#ifdef VERBOSE
fprintf(stderr, "found\texpected\tconstant\n");
#endif
/* identify the constant c for the exponential
decrease of the chunksize */
expected_chunk_size = openwork / threads;
c = (double) chunksizes[0] / expected_chunk_size;
for (i = 0; i < global_chunknr; i++) {
/* calculate the new expected chunksize */
if (expected_chunk_size > 1)
expected_chunk_size = c * openwork / threads;
#ifdef VERBOSE
fprintf(stderr, "%8d\t%8d\t%lf\n", chunksizes[i],
expected_chunk_size, c * chunksizes[i]/expected_chunk_size);
#endif
/* check if chunksize is inside the rounding errors */
if (abs (chunksizes[i] - expected_chunk_size) >= 2) {
result = 0;
#ifndef VERBOSE
fprintf(stderr, "Chunksize differed from expected "
"value: %d instead of %d\n", chunksizes[i],
expected_chunk_size);
return 0;
#endif
} /* end if */
#ifndef VERBOSE
if (expected_chunk_size - chunksizes[i] < 0)
fprintf(stderr, "Chunksize did not decrease: %d"
" instead of %d\n", chunksizes[i],expected_chunk_size);
#endif
/* calculating the remaining amount of work */
openwork -= chunksizes[i];
}
}
return result;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_guided()) {
num_failed++;
}
}
return num_failed;
}
|
tinyexr.h | #ifndef TINYEXR_H
#define TINYEXR_H
/*
Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
#define TINYEXR_X86_OR_X64_CPU 1
#else
#define TINYEXR_X86_OR_X64_CPU 0
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
#define TINYEXR_LITTLE_ENDIAN 1
#else
#define TINYEXR_LITTLE_ENDIAN 0
#endif
// Use miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
#if 0
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
#endif
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
#if 0
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
#endif
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
#if 0
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
#endif
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
#if 0
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
#endif
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
#if 0
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#endif
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#include "miniz.h"
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
union FP32 {
unsigned int u;
float f;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s).clear();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += channels[c].name.length() + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), channels[c].name.length());
p += channels[c].name.length();
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
mz_ulong outSize = mz_compressBound(src_size);
int ret = mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
/* TinyEXR issue 160. in + 1 -> in */
if (in >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSizeInBytes, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSizeInBytes) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short));
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short)));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string &layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
#if 0
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
#endif
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
#if 0
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
#endif
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.emplace(exr_headers[i]->name);
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
#if 0
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
#endif
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
#if 0
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
#endif
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
#if 0
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#endif
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
schedule.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int ordered_example(int lb, int ub, int stride)
{
int i;
int size = (ub-lb)/ stride;
double *output = (double*)malloc(size * sizeof(double));
#pragma omp target teams map(from:output[0:size])
#pragma omp parallel for ordered schedule(dynamic)
for (i=lb; i<ub; i+=stride) {
#pragma omp ordered
{
///////////////////////////////////////
//
// Make sure device printf is available, otherwise freezing
printf(" %d\n", i);
// The following shall be printed in order
// 0
// 5
// 10
// 15
// 20
// 21
// 22
//..
// 95
//
////////////////////////////////////////
output[(i-lb)/stride] = omp_get_wtime();
}
}
// verification
for (int j = 0; j < size; j++) {
for (int jj = j+1; jj < size; jj++) {
if (output[j] > output[jj]) {
printf("Fail to schedule in order.\n");
free(output);
return 1;
}
}
}
free(output);
printf("test OK\n");
return 0;
}
int main()
{
return ordered_example(0, 100, 5);
}
|
parallel_variable_transfer_utility.h | /*
==============================================================================
KratosStructuralApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: nelson $
* Date: $Date: 2008-12-09 15:23:36 $
* Revision: $Revision: 1.2 $
*
* ***********************************************************/
#if !defined(KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED )
#define KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED
//System includes
//External includes
#include "boost/smart_ptr.hpp"
//Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "includes/element.h"
#include "integration/integration_point.h"
#include "geometries/geometry.h"
#include "linear_solvers/skyline_lu_factorization_solver.h"
#include "spaces/ublas_space.h"
// #include "spaces/parallel_ublas_space.h"
#include "geometries/hexahedra_3d_8.h"
#include "geometries/tetrahedra_3d_4.h"
namespace Kratos
{
class ParallelVariableTransferUtility
{
public:
typedef Dof<double> TDofType;
typedef PointerVectorSet<TDofType, IndexedObject> DofsArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef double* ContainerType;
typedef Element::DofsVectorType DofsVectorType;
typedef Geometry<Node<3> >::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef Geometry<Node<3> >::GeometryType GeometryType;
typedef Geometry<Node<3> >::CoordinatesArrayType CoordinatesArrayType;
// typedef ParallelUblasSpace<double, CompressedMatrix, Vector> SpaceType;
typedef UblasSpace<double, Matrix, Vector> SpaceType
;
/**
* Constructor.
*/
ParallelVariableTransferUtility()
{
std::cout << "ParallelVariableTransferUtility created" << std::endl;
}
/**
* Destructor.
*/
virtual ~ParallelVariableTransferUtility()
{}
/**
* Initializes elements of target model part.
* @param rTarget new/target model part
* KLUDGE: new model part instance is not automatically initialized
*/
void InitializeModelPart( ModelPart& rTarget )
{
for( ModelPart::ElementIterator it = rTarget.ElementsBegin();
it!= rTarget.ElementsEnd(); it++ )
{
(*it).Initialize();
}
}
/**
* Transfer of nodal solution step variables.
* This Transfers all solution step variables from r_old_model_part
* to r_new_model_part.
* To cope with moved meshes, the source model_part is resetted to its
* reference configuration temporarily!
* @param r_old_model_part source model_part
* @param r_new_model_part target model_part
* TODO: find more elegant way to check existence of variables in each node
*/
void TransferNodalVariables(ModelPart& rSource, ModelPart& rTarget)
{
//reset source model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//reset target model part to reference configuration
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//time_target= time_source
ProcessInfo SourceCurrentProcessInfo= rSource.GetProcessInfo();
rTarget.CloneTimeStep(SourceCurrentProcessInfo[TIME]);
ElementsArrayType& OldMeshElementsArray= rSource.Elements();
Element correspondingElement;
// FixDataValueContainer newNodalValues;
// FixDataValueContainer oldNodalValues;
Point localPoint;
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
if(FindPartnerElement(*(it), OldMeshElementsArray,
correspondingElement,localPoint))
{
//TransferVariables from Old Mesh to new Node
if(it->HasDofFor(DISPLACEMENT_X)
|| it->HasDofFor(DISPLACEMENT_Y)
|| it->HasDofFor(DISPLACEMENT_Z))
{
noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_NULL );
noalias(it->GetSolutionStepValue(DISPLACEMENT_EINS))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_EINS );
noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL_DT))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_NULL_DT );
noalias(it->GetSolutionStepValue(ACCELERATION_NULL))=
MappedValue(correspondingElement,
localPoint,ACCELERATION_NULL );
noalias(it->GetSolutionStepValue(DISPLACEMENT_OLD))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_OLD );
}
if(it->HasDofFor(WATER_PRESSURE))
{
it->GetSolutionStepValue(WATER_PRESSURE_NULL)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL);
it->GetSolutionStepValue(WATER_PRESSURE_EINS)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_EINS);
it->GetSolutionStepValue(WATER_PRESSURE_NULL_DT)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL_DT);
it->GetSolutionStepValue(WATER_PRESSURE_NULL_ACCELERATION)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL_ACCELERATION);
}
if(it->HasDofFor(AIR_PRESSURE))
{
it->GetSolutionStepValue(AIR_PRESSURE_NULL)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL);
it->GetSolutionStepValue(AIR_PRESSURE_EINS)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_EINS);
it->GetSolutionStepValue(AIR_PRESSURE_NULL_DT)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL_DT);
it->GetSolutionStepValue(AIR_PRESSURE_NULL_ACCELERATION)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL_ACCELERATION);
}
std::cout <<"VARIABLES TRANSFERRED" << std::endl;
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferNodalVariables(...)#####"<<std::endl;
}
}
//restore source model_part
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
//restore target model_part
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of INSITU_STRESS.
* This transfers the in-situ stress from rSource to rTarget.
* @param rSource the source model part
* @param rTarget the target model part
*/
void TransferInSituStress( ModelPart& rSource, ModelPart& rTarget )
{
//reset original model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
TransferVariablesToNodes(rSource, INSITU_STRESS);
// TransferVariablesBetweenMeshes(rSource, rTarget,INSITU_STRESS);
// TransferVariablesToGaussPoints(rTarget, INSITU_STRESS);
TransferVariablesToGaussPoints(rSource, rTarget, INSITU_STRESS);
//restore model_part
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of integration point variables.
* This Transfers all variables on integration points from r_old_model_part
* to r_new_model_part.
* To cope with moved meshes, the source model_part is resetted to its
* reference configuration temporarily!
* @param r_old_model_part source model_part
* @param r_new_model_part target model_part
* TODO: find more elegant way to check existence of variables in each node
* CAUTION: THIS MAY CREATE VARIABLES ON NODES THAT MIGHT CAUSE A SEGMENTATION
* FAULT ON RUNTIME
*/
void TransferConstitutiveLawVariables(ModelPart& rSource, ModelPart& rTarget)
{
//reset source model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//reset target model part to reference configuration
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
TransferVariablesToNodes(rSource, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
// TransferVariablesBetweenMeshes(rSource, rTarget,ELASTIC_LEFT_CAUCHY_GREEN_OLD);
//
// TransferVariablesToGaussPoints(rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
TransferVariablesToGaussPoints( rSource, rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
// restore target model_part
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber].resize(3,3,false);
noalias(ValuesOnIntPoint[PointNumber])= ZeroMatrix(3,3);
for(unsigned int node= 0; node< (*it)->GetGeometry().size(); node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
unsigned int NodesDispMin= 1;
unsigned int NodesDispMax= (*it)->GetGeometry().size();
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber].resize(6,false);
noalias(ValuesOnIntPoint[PointNumber])= ZeroVector(6);
for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
unsigned int NodesDispMin= 1;
unsigned int NodesDispMax= (*it)->GetGeometry().size();
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber]= 0.0;
for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
ValuesOnIntPoint[point].resize(6,false);
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
noalias(ValuesOnIntPoint[point])=
ValueVectorInOldMesh(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part,
* ModelPart& source_model_part, Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part,
* ModelPart& source_model_part, Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource,
ModelPart& rTarget,
Variable<Matrix> const& rThisVariable)
{
ElementsArrayType const& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement, sourceLocalPoint))
{
ValuesOnIntPoint[point].resize(3,3,false);
ValuesOnIntPoint[point] = ZeroMatrix(3,3);
ValuesOnIntPoint[point] = ValueMatrixInOldMesh(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Vector>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
ValuesOnIntPoint[point]=
MappedValue(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroMatrix(3,3);
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
{
for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
// for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
// it != ElementsArray.ptr_end(); ++it )
// {
// const IntegrationPointsArrayType& integration_points
// = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
//
// GeometryType::JacobiansType J(integration_points.size());
// J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
//
// const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
// //
// Matrix InvJ(3,3);
// double DetJ;
// for(unsigned int point=0; point< integration_points.size(); point++)
// {
// MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
//
// double dV= DetJ*integration_points[point].Weight();
//
// for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
// {
// for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
// {
// M(((*it)->GetGeometry()[prim].Id()-1),
// ((*it)->GetGeometry()[sec].Id()-1))+=
// Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
// }
// }
// }
// }
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
for(unsigned int firstvalue=0; firstvalue<3; firstvalue++)
{
for(unsigned int secondvalue=0; secondvalue<3; secondvalue++)
{
noalias(g)= ZeroVector(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point](firstvalue,secondvalue))
*Ncontainer(point, prim)*dV;
}
}
}
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue)
= g((it->Id()-1));
}
}//END firstvalue
}//END secondvalue
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroVector(6);
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
/*
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end(); ++it )
{
//SetUpEquationSystem
SpaceType::MatrixType M((*it)->GetGeometry().size(),(*it)->GetGeometry().size());
SpaceType::VectorType g((*it)->GetGeometry().size());
SpaceType::VectorType b((*it)->GetGeometry().size());
noalias(M)= ZeroMatrix((*it)->GetGeometry().size(),(*it)->GetGeometry().size());
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(prim,sec)+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
for(unsigned int firstvalue=0; firstvalue<6; firstvalue++)
{
noalias(g)= ZeroVector((*it)->GetGeometry().size());
noalias(b)= ZeroVector((*it)->GetGeometry().size());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(prim)
+=(ValuesOnIntPoint[point](firstvalue))
*Ncontainer(point, prim)*dV;
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
(*it)->GetGeometry()[prim].GetSolutionStepValue(rThisVariable)(firstvalue)
= g(prim);
}
}//END firstvalue
}
*/
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer =
(*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
for(unsigned int firstvalue=0; firstvalue<6; firstvalue++)
{
noalias(g)= ZeroVector(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints( (*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point](firstvalue))
*Ncontainer(point, prim)*dV;
}
}
}
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue)
= g((it->Id()-1));
}
}//END firstvalue
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= 0.0;
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
noalias(g)= ZeroVector(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point])
*Ncontainer(point, prim)*dV;
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= g((it->Id()-1));
}
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroMatrix(3,3);
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
{
for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
for(unsigned int firstvalue= 0; firstvalue< 3; firstvalue++)
{
for(unsigned int secondvalue= 0; secondvalue< 3; secondvalue++)
{
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
ValueMatrixInOldMesh( sourceElement,sourceLocalPoint,rThisVariable, firstvalue, secondvalue );
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Matrix...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue
*Ncontainer(point, prim)*dV;
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue)
= g((it->Id()-1));
}
}//END firstvalue
}//END secondvalue
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTargetw,
Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroVector(6);
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++)
{
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size() ; sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
for(unsigned int firstvalue= 0; firstvalue< 6; firstvalue++)
{
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
ValueVectorInOldMesh( sourceElement,sourceLocalPoint,rThisVariable, firstvalue);
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Vector...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue
*Ncontainer(point, prim)*dV;
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue)
= g((it->Id()-1));
}
}//END firstvalue
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= 0.0;
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point sourceLocalPoint;
Point targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
MappedValue( sourceElement,sourceLocalPoint,rThisVariable);
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...double...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue*Ncontainer(point, prim)*dV;
for(unsigned int sec=0; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= g((it->Id()-1));
}
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param oldElement corresponding element in source mesh
* @param localPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
* @see MappedValue( Element& sourceElement,Point& targetPoint,
const Variable<double>& rThisVariable)
*/
Matrix ValueMatrixInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
{
Matrix newValue(3,3);
noalias(newValue)= ZeroMatrix(3,3);
Matrix temp(3,3);
for(unsigned int i=0; i< oldElement.GetGeometry().size(); i++)
{
noalias(temp)= oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
for(unsigned int k=0; k<3; k++)
for(unsigned int l=0; l<3; l++)
newValue(k,l)
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*temp(k,l);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param oldElement corresponding element in source mesh
* @param localPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see MappedValue( Element& sourceElement,Point& targetPoint,
const Variable<double>& rThisVariable)
*/
Vector ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
{
Vector newValue(6);
noalias(newValue) = ZeroVector(6);
Vector temp(6);
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
noalias(temp)= oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
for(unsigned int k=0; k<6; k++)
newValue(k) +=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)*temp(k);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
*/
double MappedValuePressure( Element& sourceElement,Point& targetPoint,
const Variable<double>& rThisVariable)
{
double newValue = 0.0;
Geometry<Node<3> >::Pointer pPressureGeometry;
if(sourceElement.GetGeometry().size()==20 || sourceElement.GetGeometry().size()==27)
pPressureGeometry= Geometry<Node<3> >::Pointer(new Hexahedra3D8 <Node<3> >(
sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1),
sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3),
sourceElement.GetGeometry()(4),sourceElement.GetGeometry()(5),
sourceElement.GetGeometry()(6),sourceElement.GetGeometry()(7)));
if(sourceElement.GetGeometry().size()==10 )
pPressureGeometry= Geometry<Node<3> >::Pointer(new Tetrahedra3D4 <Node<3> >(
sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1),
sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3)));
for(unsigned int i= 0; i< pPressureGeometry->size(); i++)
{
newValue+=
pPressureGeometry->ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
*/
double MappedValue( Element& sourceElement,Point& targetPoint,
const Variable<double>& rThisVariable)
{
double newValue = 0.0;
for(unsigned int i= 0; i< sourceElement.GetGeometry().size(); i++)
{
newValue+=
sourceElement.GetGeometry().ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
*/
Vector MappedValue( Element& sourceElement,Point& targetPoint,
const Variable<array_1d<double, 3 > >& rThisVariable)
{
Vector newValue = ZeroVector(3);
for(unsigned int i=0; i<sourceElement.GetGeometry().size(); i++)
{
newValue+=
sourceElement.GetGeometry().ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* calculates for a point given with the physical coords newNode
* the element oldElement where it lays in and the natural coords
* localPoint within this element
* @return whether a corresponding element and natural coords could be found
* @param newNode physical coordinates of given point
* @param OldMeshElementsArray Array of elements wherein the search should be performed
* @param oldElement corresponding element for newNode
* @param rResult corresponding natural coords for newNode
* TODO: find a faster method for outside search (hextree? etc.), maybe outside this
* function by restriction of OldMeshElementsArray
*/
bool FindPartnerElement( CoordinatesArrayType& newNode,
const ElementsArrayType& OldMeshElementsArray,
Element& oldElement,Point& rResult)
{
bool partner_found= false;
//noalias(rResult)= ZeroVector(3);
ElementsArrayType::Pointer OldElementsSet( new ElementsArrayType() );
std::vector<double > OldMinDist;
bool newMinDistFound= false;
int counter = 0;
do
{
double minDist = 1.0e120;
newMinDistFound= false;
OldElementsSet->clear();
//loop over all master surfaces (global search)
for( ElementsArrayType::ptr_const_iterator it = OldMeshElementsArray.ptr_begin();
it != OldMeshElementsArray.ptr_end(); ++it )
{
//loop over all nodes in tested element
for( unsigned int n=0; n<(*it)->GetGeometry().size(); n++ )
{
double dist = ((*it)->GetGeometry().GetPoint(n).X0()-newNode[0])
*((*it)->GetGeometry().GetPoint(n).X0()-newNode[0])
+((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1])
*((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1])
+((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2])
*((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2]);
if( fabs(dist-minDist) < 1e-7 )
{
OldElementsSet->push_back(*it);
}
else if( dist < minDist )
{
bool alreadyUsed= false;
for(unsigned int old_dist= 0; old_dist<OldMinDist.size(); old_dist++)
{
if(fabs(dist- OldMinDist[old_dist])< 1e-7 )
alreadyUsed= true;
}
if(!alreadyUsed)
{
OldElementsSet->clear();
minDist = dist;
OldElementsSet->push_back(*it);
newMinDistFound= true;
}
}
}
}
OldMinDist.push_back(minDist);
// KRATOS_WATCH(OldElementsSet->size());
for( ElementsArrayType::ptr_const_iterator it = OldElementsSet->ptr_begin();
it != OldElementsSet->ptr_end(); ++it )
{
// std::cout << "checking elements list" << std::endl;
if( (*it)->GetGeometry().IsInside( newNode, rResult ) )
{
// std::cout << "isInside" << std::endl;
oldElement=**(it);
partner_found=true;
return partner_found;
}
}
std::cout << counter << std::endl;
counter++;
if( counter > 27 )
break;
}
while(newMinDistFound);
if(!partner_found)
std::cout<<" !!!! NO PARTNER FOUND !!!! "<<std::endl;
return partner_found;
}
//***************************************************************************
//***************************************************************************
/**
* Auxiliary function.
* This one calculates the target value of given Matrix-Variable at row firtsvalue
* and column secondvalue by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @param firstvalue row index
* @param secondvalue column index
* @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue)
*/
double ValueMatrixInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Matrix>& rThisVariable, unsigned int firstvalue, unsigned int secondvalue )
{
double newValue= 0.0;
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
newValue
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Vector-Variable at firtsvalue
* by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @param firstvalue index
* @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue)
*/
double ValueVectorInOldMesh(Element& oldElement,Point& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue )
{
double newValue= 0.0;
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
newValue
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue);
}
return newValue;
}
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
};//Class Scheme
}//namespace Kratos.
#endif /* KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED defined */
|
GB_unop__identity_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fp32)
// op(A') function: GB (_unop_tran__identity_int16_fp32)
// C type: int16_t
// A type: float
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fp32)
(
int16_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simd-6.c | /* PR libgomp/58482 */
/* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
int a[1024] __attribute__((aligned (32))) = { 1 };
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
__attribute__((noinline, noclone)) int
foo (void)
{
int i, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp parallel for simd aligned(a : 32) reduction(+:s) \
reduction(foo:t, u)
for (i = 0; i < 1024; i++)
{
int x = a[i];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s)
abort ();
return s.s;
}
int
main ()
{
int i;
for (i = 0; i < 1024; i++)
a[i] = (i & 31) + (i / 128);
int s = foo ();
if (s != 19456)
abort ();
return 0;
}
|
GB_hyper_prune.c | //------------------------------------------------------------------------------
// GB_hyper_prune: remove empty vectors from a hypersparse Ap, Ah list
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Removes empty vectors from a hypersparse list. On input, *Ap and *Ah are
// assumed to be NULL. The input arrays Ap_old and Ah_old are not modified,
// and thus can be shallow content from another matrix. New hyperlists Ap and
// Ah are allocated, for nvec vectors, all nonempty.
#include "GB.h"
GrB_Info GB_hyper_prune
(
// output, not allocated on input:
int64_t *restrict *p_Ap, size_t *p_Ap_size, // size nvec+1
int64_t *restrict *p_Ah, size_t *p_Ah_size, // size nvec
int64_t *p_nvec, // # of vectors, all nonempty
// input, not modified
const int64_t *Ap_old, // size nvec_old+1
const int64_t *Ah_old, // size nvec_old
const int64_t nvec_old, // original number of vectors
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Ap != NULL) ;
ASSERT (p_Ah != NULL) ;
ASSERT (p_nvec != NULL) ;
ASSERT (Ap_old != NULL) ;
ASSERT (Ah_old != NULL) ;
ASSERT (nvec_old >= 0) ;
(*p_Ap) = NULL ; (*p_Ap_size) = 0 ;
(*p_Ah) = NULL ; (*p_Ah_size) = 0 ;
(*p_nvec) = -1 ;
int64_t *restrict W = NULL ; size_t W_size = 0 ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
int64_t *restrict Ah = NULL ; size_t Ah_size = 0 ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvec_old, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
W = GB_MALLOC_WERK (nvec_old+1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count the # of nonempty vectors
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
// W [k] = 1 if the kth vector is nonempty; 0 if empty
W [k] = (Ap_old [k] < Ap_old [k+1]) ;
}
int64_t nvec ;
GB_cumsum (W, nvec_old, &nvec, nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate the result
//--------------------------------------------------------------------------
Ap = GB_MALLOC (nvec+1, int64_t, &Ap_size) ;
Ah = GB_MALLOC (nvec , int64_t, &Ah_size) ;
if (Ap == NULL || Ah == NULL)
{
// out of memory
GB_FREE_WERK (&W, W_size) ;
GB_FREE (&Ap, Ap_size) ;
GB_FREE (&Ah, Ah_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create the Ap and Ah result
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
if (Ap_old [k] < Ap_old [k+1])
{
int64_t knew = W [k] ;
Ap [knew] = Ap_old [k] ;
Ah [knew] = Ah_old [k] ;
}
}
Ap [nvec] = Ap_old [nvec_old] ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
(*p_Ap) = Ap ; (*p_Ap_size) = Ap_size ;
(*p_Ah) = Ah ; (*p_Ah_size) = Ah_size ;
(*p_nvec) = nvec ;
return (GrB_SUCCESS) ;
}
|
mhpTest7.c | void foo() {
int x;
#pragma omp barrier
foo();
}
void bar() {
int y;
#pragma omp barrier
bar();
}
int main() {
#pragma omp parallel
{
if (x > 2)
foo();
else if (x > 3)
bar();
else {
x=1;
#pragma omp barrier
x=2;
#pragma omp barrier
x=3;
#pragma omp barrier
x=4;
#pragma omp barrier
x=5;
#pragma omp barrier
x=6;
#pragma omp barrier
x=7;
#pragma omp barrier
x=8;
#pragma omp barrier
}
}
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "diffprivate.h"
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_diff(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu_diff( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_nt_diff(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(k = 0; k < K; ++k){
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc+j] += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tn_diff(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(k = 0; k < K; ++k){
for(i = 0; i < M; ++i){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
//printf("C[i*ldc+j]=%f\n",C[i*ldc+j]);
}
}
diff_private_func(C, N*M);
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm_cpu_diff(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn_diff(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt_diff(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
preprocessingInfo.c | #include <stdio.h>
int main (void)
{
int mits=5000;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
mits ++;
return 0;
}
|
move_particle_utility.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pablo Becker
//
#if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED)
#define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
///
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "processes/node_erase_process.h"
///
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/bounding_box.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "convection_diffusion_application.h"
#include "convection_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveParticleUtilityScalarTransport
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
//typedef PointType::CoordinatesArrayType CoordinatesArrayType;
typedef typename Configure::ContainerType ContainerType;
//typedef Configure::PointerType PointerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
//typedef Configure::ResultPointerType ResultPointerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector;
//typedef Configure::ContactPairType ContactPairType;
//typedef Configure::ContainerContactType ContainerContactType;
//typedef Configure::IteratorContactType IteratorContactType;
//typedef Configure::PointerContactType PointerContactType;
//typedef Configure::PointerTypeIterator PointerTypeIterator;
KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport);
//template<unsigned int TDim>
MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles)
: mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) ,
mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) ,
mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) ,
mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) ,
mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable())
{
std::cout << "initializing moveparticle utility for scalar transport" << std::endl;
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = double(rneigh.size());
for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2));
//if (current_distance>distance)
// distance=current_distance;
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance;
node_id=pnode->GetId();
}
}
mlast_node_id=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double mElemSize;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
mElemSize = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
mElemSize += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < mElemSize) mElemSize = Length;
}
mElemSize = sqrt(mElemSize);
ielem->GetValue(MEAN_SIZE) = mElemSize;
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mnelems = mr_model_part.Elements().size();
std::cout << "about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mparticles_vector.resize(mnelems*mmaximum_number_of_particles);
//and this vector contains the current number of particles that are in each element (currently zero)
mnumber_of_particles_in_elems.resize(mnelems);
mnumber_of_particles_in_elems=ZeroVector(mnelems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mnumber_of_particles_in_elems_aux.resize(mnelems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mvector_of_particle_pointers_vectors.resize(mnelems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << "about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
moffset=0;
//Convection_Particle& firstparticle =mparticles_vector[0];
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mmaximum_number_of_particles*2); j++)
// particle_pointers.push_back(&firstparticle);
mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 );
ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mnumber_of_particles_in_elems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
//mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
Convection_Particle& pparticle = mparticles_vector[particle_id-1];
pparticle.X()=pos(j,0);
pparticle.Y()=pos(j,1);
pparticle.Z()=pos(j,2);
pparticle.GetEraseFlag()=false;
float & scalar1= pparticle.GetScalar1();
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
KRATOS_WATCH(m_nparticles);
//KRATOS_WATCH(mlast_elem_id);
mparticle_printing_tool_initialized=false;
//std::cin >> artz;
}
virtual ~MoveParticleUtilityScalarTransport()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mr_model_part.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << "finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar);
vector_mean_velocity *= nodal_weight;
const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / ( ielem->GetValue(MEAN_SIZE) );
}
}
KRATOS_CATCH("")
}
//name self explained
void ResetBoundaryConditions()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mUnknownVar))
{
inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ;
}
}
KRATOS_CATCH("")
}
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
//to move all the particles across the streamlines. heavy task!
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
//KRATOS_WATCH(offset)
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
//KRATOS_WATCH(post_offset)
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
max_nsubsteps = 10;
max_substep_dt=delta_t/double(max_nsubsteps);
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mnumber_of_particles_in_elems_aux[ii]=number_of_particles;
mnumber_of_particles_in_elems[ii]=0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
WeakPointerVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
//for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++)
//{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1);
if ( (results.size()) !=max_results)
results.resize(max_results);
unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem)
for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++)
{
Convection_Particle & pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag==false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1);
//int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1);
if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false)
{
{
ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1);
#pragma omp critical
{
if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
if (number_of_particles_in_current_elem>mmaximum_number_of_particles)
KRATOS_WATCH("MAL");
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
/*
//now we pass info from the local vector to the elements:
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii);
//old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii);
}
}
*/
//after having changed everything we change the status of the modd_timestep flag:
moffset = post_offset;; //
KRATOS_CATCH("")
}
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold= 0.0/(double(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
//double sq_dist = 0;
//these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);}
else
{
nodes_addedweights[j]+= weight;
//nodes_addedtemp[j] += weight * particle_temp;
nodes_added_scalar1[j] += weight*particle_scalar1;
}//
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
//inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature
double & height = inode->FastGetSolutionStepValue(mProjectionVar);
height /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature
}
}
}
KRATOS_CATCH("")
}
void TransferLagrangianToEulerianImp() //semi implicit
{
KRATOS_TRY
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
std::cout << "projecting info to mesh (semi implicit)" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
//creating a matrix for each of the problems.
BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance
array_1d<double,(TDim+1)> rhs_scalar1;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors
nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors
mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes.
//mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors
Geometry<Node<3> >& geom = ielem->GetGeometry();
const double elem_volume = geom.Area();
for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
double weight=N(j);
for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix
mass_matrix(j,k) += weight*N(k);
rhs_scalar1[j] += weight * double(particle_scalar1);
//adding also a part with the lumped mass matrix to reduce overshoots and undershoots
if(true)
{
double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion
nodes_addedweights[j]+= this_particle_weight;
nodes_added_scalar1[j] += this_particle_weight*particle_scalar1;
}
}
}
}
//now we invert the matrix
BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1);
if(TDim==3)
InvertMatrix( mass_matrix, inverse_mass_matrix);
else
InvertMatrix3x3( mass_matrix, inverse_mass_matrix);
//and now compute the elemental contribution to the gobal system:
if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless.
{
for (int i=0 ; i!=(TDim+1); i++)
{
for (int j=0 ; j!=(TDim+1); j++)
{
nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim)));
}
}
//and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level.
for (int i=0 ; i!=(TDim+1); i++)
nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim)));
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar);
scalar1 /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles
//KRATOS_WATCH(offset)
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
//std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl;
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
//KRATOS_WATCH(iii)
if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
template< class TDataType > void AddUniqueWeakPointer
(WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename WeakPointerVector< TDataType >::iterator i = v.begin();
typename WeakPointerVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate.lock())->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**************************************************************************************************************
//**************************************************************************************************************
void PreReseed(int minimum_number_of_particles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset =moffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k];
//KRATOS_WATCH(k);
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 )
{
//KRATOS_WATCH("elem with little particles")
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
//double conductivity = ielem->GetProperties()[CONDUCTIVITY];
//KRATOS_WATCH(conductivity);
for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
if (is_found==false)
{
KRATOS_WATCH(aux2_N);
}
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mparticles_vector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
void PostReseed(int minimum_number_of_particles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset;
//TOOLS FOR THE PARALELIZATION
//int last_id= (mr_linea_model_part.NodesEnd()-1)->Id();
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
//KRATOS_WATCH(number_of_threads);
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
//KRATOS_WATCH(number_of_threads);
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
//typedef Node < 3 > PointType;
//std::vector<ModelPart::NodesContainerType> aux;// aux;
//aux.resize(number_of_threads);
//ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin();
//ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd();
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
//unsigned int number_of_water_reseeded_particles;
//array_1d<double, 3 > nodes_distances;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles=0;
//reseed_more=true;
number_of_reseeded_particles= 3+2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
//now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
if (is_found==false)
{
KRATOS_WATCH(aux_N);
KRATOS_WATCH(j)
KRATOS_WATCH(ielem->Id())
}
mesh_scalar1 = 0.0;
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetEraseFlag()=false;
mparticles_vector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
number_of_particles_in_elem++;
if (keep_looking)
{
KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", "");
}
else
{
reused_particles++;
}
}
}
}
}
KRATOS_CATCH("")
}
void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor )
{
KRATOS_TRY
//mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list
if(mparticle_printing_tool_initialized==false)
{
mfilter_factor=input_filter_factor;
if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0)
KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", "");
lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar);
for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++)
{
Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mparticle_printing_tool_initialized=true;
}
//resetting data of the unused particles
const double inactive_particle_position= -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin();
for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mUnknownVar) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter=0;
//ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin();
for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++)
{
Convection_Particle& pparticle =mparticles_vector[i];
if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
///this function moves a particle according to the "velocity" given
///by "rVariable". The movement is performed in nsubsteps, during a total time
///of Dt
void MoveParticle( Convection_Particle & pparticle,
Element::Pointer & pelement,
WeakPointerVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
//bool have_air_node;
//bool have_water_node;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
//DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY
//////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned int check_from_element_number=0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (KEEP_INTEGRATING==true)
{
is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
KEEP_INTEGRATING=false;
break;
}
}
else
break;
}
}
if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement)
if (is_found==false) ( pparticle.GetEraseFlag()=true);
pparticle.Coordinates() = position;
}
void CorrectParticleUsingDeltaVariables(
Convection_Particle & pparticle,
Element::Pointer & pelement,
Geometry< Node<3> >& geom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pparticle.Coordinates();
float & particle_scalar1 = pparticle.GetScalar1();
//double distance=0.0;
double delta_scalar1 = 0.0;
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_WATCH(N)
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
}
void MoveParticle_inverse_way(
Convection_Particle & pparticle,
Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
double scalar1 = 0.0;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position -= vel*substep_dt;//weight;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{ if (KEEP_INTEGRATING==true) {
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ;
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
}
only_integral += 1.0;//weight ; //values saved for the current time step
position-=vel*substep_dt;//weight;
}
else KEEP_INTEGRATING=false;
}
}
pparticle.GetScalar1()=scalar1;
}
//else {KRATOS_WATCH(position); }
}
///this function should find the element into which a given node is located
///and return a pointer to the element and the vector containing the
///shape functions that define the postion within the element
///if "false" is devolved the element is not found
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true) //that was easy!
{
return true;
}
//to begin with we check the neighbour elements; it is a bit more expensive
WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0){
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
// VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
WeakPointerVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
unsigned int & check_from_element_number,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
//if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++)
{
Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((elements_in_trajectory(i))));
N=aux_N;
check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
//now we check the neighbour elements:
WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
///////////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& z0 = nodes_positions[2];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& z1 = nodes_positions[5];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
const double& z2 = nodes_positions[8];
const double& x3 = nodes_positions[9];
const double& y3 = nodes_positions[10];
const double& z3 = nodes_positions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
//***************************************
//***************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N)
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D
{
//std::cout << "NEW ELEMENT" << std::endl;
//double total;
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
//std::cout << "inside i" << i << std::endl;
for (unsigned int j=0; j!=(4-i);j++)
{
//std::cout << "inside j" << j << std::endl;
for (unsigned int k=0; k!=(4-i-j);k++)
{
//std::cout << "inside k" << k << std::endl;
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
}
template<class T>
bool InvertMatrix(const T& input, T& inverse)
{
typedef permutation_matrix<std::size_t> pmatrix;
// create a working copy of the input
T A(input);
// create a permutation matrix for the LU-factorization
pmatrix pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(identity_matrix<double> (A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result)
{
double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2))
-A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0))
+A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0));
double invdet = 1/determinant;
result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet;
result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet;
result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet;
result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet;
result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet;
result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet;
result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet;
result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet;
result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;
return true;
}
virtual int Check()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo();
if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false)
KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", "");
//std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl;
ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
//UNKNOWN VARIABLE
if(my_settings->IsDefinedUnknownVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", "");
//PROJECTION VARIABLE
//used as intermediate variable, is the variable at time n+1 but only accounting for the convective term.
if(my_settings->IsDefinedProjectionVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", "");
//CONVECTION VELOCITY VARIABLE
//CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used:
//if(my_settings->IsDefinedConvectionVariable()==true)
//{
// if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false)
// KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", "");
//}
//else
// std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl;
if(my_settings->IsDefinedConvectionVariable()==true)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", "");
//VELOCITY VARIABLE
if(my_settings->IsDefinedVelocityVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", "");
return 0;
KRATOS_CATCH("")
}
ModelPart& mr_model_part;
int m_nparticles;
int mnelems;
int moffset;
//vector<double> mareas_vector; UNUSED SO COMMENTED
int max_nsubsteps;
double max_substep_dt;
int mmaximum_number_of_particles;
std::vector< Convection_Particle > mparticles_vector; //Point<3>
int mlast_elem_id;
bool modd_timestep;
bool mparticle_printing_tool_initialized;
unsigned int mfilter_factor;
unsigned int mlast_node_id;
//ModelPart& mr_particle_model_part;
vector<int> mnumber_of_particles_in_elems;
vector<int> mnumber_of_particles_in_elems_aux;
//vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element
vector<ParticlePointerVector> mvector_of_particle_pointers_vectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>& mUnknownVar;
const Variable<double>& mProjectionVar;
const Variable<array_1d<double,3> >& mVelocityVar;
const Variable<array_1d<double,3> >& mMeshVelocityVar;
};
} // namespace Kratos.
#endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
|
VerletNeighborListAsBuild.h | /**
* @file VerletNeighborListAsBuild.h
* @author humig
* @date 21.05.19
*/
#pragma once
#include "../VerletNeighborListInterface.h"
#include "AsBuildPairGeneratorFunctor.h"
#include "C08TraversalColorChangeNotify.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class implements a neighbor list that remembers which thread added which particle pair and at which color
* during the build with C08 from LinkedCells.
* @tparam Particle The particle type the class uses.
*/
template <class Particle>
class VerletNeighborListAsBuild : public VerletNeighborListInterface<Particle>, ColorChangeObserver {
/**
* Adds the generator functor for validation checks as friend so it can call checkPair().
* @param true mark that it is for validation checks.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, true>;
/**
* Adds the generator functor for adding pairs as friend so it can call addPair().
* @param false test mark that it is for adding pairs.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, false>;
private:
/**
* Starts the generate functor. _baseLinkedCells has to be set before!
* @tparam useNewton3 If the functor should use newton 3.
* @tparam callCheckInstead If false, start it in generate mode, if true, in check validity mode.
* @param cutoff The cutoff to use for the particle pairs in the functor.
*/
template <bool useNewton3, bool callCheckInstead = false>
void startFunctor(double cutoff) {
internal::AsBuildPairGeneratorFunctor<Particle, callCheckInstead> functor(*this, cutoff);
// Use SoA traversal for generation and AoS traversal for validation check.
constexpr DataLayoutOption dataLayout = callCheckInstead ? DataLayoutOption::aos : DataLayoutOption::soa;
auto traversal = C08TraversalColorChangeNotify<typename VerletListHelpers<Particle>::VerletListParticleCellType,
internal::AsBuildPairGeneratorFunctor<Particle, callCheckInstead>,
dataLayout, useNewton3>(
_baseLinkedCells->getCellBlock().getCellsPerDimensionWithHalo(), &functor, this);
_baseLinkedCells->iteratePairwise(&traversal);
}
public:
/**
* This type represents the neighbor list that each thread has for each color.
*/
using ThreadNeighborList = std::unordered_map<Particle *, std::vector<Particle *>>;
/**
* This type represents the thread lists for all colors.
*/
using ColorNeighborList = std::vector<ThreadNeighborList>;
/**
* This type represents the SoA neighbor list that each thread has for each color.
*/
using SoAThreadNeighborList = std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>>;
/**
* This type represents the SoA thread lists for all colors.
*/
using SoAColorNeighborList = std::vector<SoAThreadNeighborList>;
/**
* Constructor for the VerletNeighborListAsBuild. Does only default initialization.
*/
VerletNeighborListAsBuild() : _neighborList{}, _soaListIsValid(false) {}
ContainerOption getContainerType() const override { return ContainerOption::varVerletListsAsBuild; }
/**
* @copydoc VerletNeighborListInterface::buildNeighborList()
*
* It executes C08 on the passed LinkedCells container and saves the resulting pairs in the neighbor list, remembering
* the thread and current color for each pair.
*/
void buildNeighborList(LinkedCells<Particle, typename VerletListHelpers<Particle>::VerletListParticleCellType,
typename VerletListHelpers<Particle>::SoAArraysType> &linkedCells,
bool useNewton3) override {
_soaListIsValid = false;
_baseLinkedCells = &linkedCells;
unsigned int maxNumThreads = autopas_get_max_threads();
for (int c = 0; c < 8; c++) {
std::vector<ThreadNeighborList> &colorList = _neighborList[c];
colorList.resize(maxNumThreads);
for (unsigned int i = 0; i < maxNumThreads; i++) {
colorList[i].clear();
}
}
if (useNewton3) {
startFunctor<true>(linkedCells.getInteractionLength());
} else {
startFunctor<false>(linkedCells.getInteractionLength());
}
}
bool checkNeighborListValidity(bool useNewton3, double cutoff) override {
_allPairsPresent = true;
if (_baseLinkedCells == nullptr) return false;
constexpr bool callCheck = true;
if (useNewton3) {
startFunctor<true, callCheck>(cutoff);
} else {
startFunctor<false, callCheck>(cutoff);
}
return _allPairsPresent;
}
/**
* Returns the internal AoS neighbor list. Should be used by traversals.
*
* The internal neighbor list is a vector of the neighbor lists of each color. Each of those neighbor lists is a
* vector that contains one neighbor list for each thread. Each of those neighbor lists is a map from each particle to
* a vector containing its neighbors.
*
* @return the internal AoS neighbor list.
*/
const auto &getInternalNeighborList() { return _neighborList; }
/**
* Returns the internal SoA neighbor list. Should be used by traversals.
*
* The internal SoA neighbor list is a vector of the SoA neighbor lists of each color. Each of those SoA neighbor
* lists is a vector that contains one SoA neighbor list for each thread. Each of those SoA neighbor lists is a vector
* of vectors where the i-th vector contains the indices of all neighbors of particle i in the SoA.
*
* @return the internal SoA neighbor list.
*/
const auto &getInternalSoANeighborList() { return _soaNeighborList; }
void receiveColorChange(unsigned long newColor) override { _currentColor = newColor; }
/**
* @see getInternalSoANeighborList()
*/
void generateSoAFromAoS() override {
// Generate a map from pointer to particle index in the SoA. This works, because during loadSoA"()" the particles
// are loaded in the same order.
std::unordered_map<Particle *, size_t> _aos2soaMap;
_aos2soaMap.reserve(_baseLinkedCells->getNumParticles());
size_t i = 0;
for (auto iter = _baseLinkedCells->begin(); iter.isValid(); ++iter, ++i) {
_aos2soaMap[&(*iter)] = i;
}
constexpr int numColors = 8;
for (int color = 0; color < numColors; color++) {
unsigned int numThreads = _neighborList[color].size();
_soaNeighborList[color].resize(numThreads);
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel num_threads(numThreads)
#endif
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(static)
#endif
for (unsigned int thread = 0; thread < numThreads; thread++) {
auto ¤tThreadList = _soaNeighborList[color][thread];
currentThreadList.clear();
currentThreadList.resize(_aos2soaMap.size());
for (const auto &pair : _neighborList[color][thread]) {
size_t indexFirst = _aos2soaMap[pair.first];
for (const auto &second : pair.second) {
size_t indexSecond = _aos2soaMap[second];
currentThreadList[indexFirst].push_back(indexSecond);
}
}
}
}
_soaListIsValid = true;
}
/**
* Loads the particle information in the SoA and returns a pointer to the filled SoA.
* @tparam TFunctor The type of the functor to use for loading the particles.
* @param f The functor to use for loading the particles.
* @return A pointer to the SoA filled. Ownership is *not* passed.
*/
template <class TFunctor>
auto *loadSoA(TFunctor *f) {
_soa.clear();
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoALoader(cell, _soa, offset);
offset += cell.numParticles();
}
return &_soa;
}
/**
* Extracts the particle information out of the SoA returned by loadSoA() before.
* @tparam TFunctor The type of the functor to use for extracting the particles.
* @param f The functor to use for extracting the particles.
*/
template <class TFunctor>
void extractSoA(TFunctor *f) {
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoAExtractor(cell, _soa, offset);
offset += cell.numParticles();
}
}
bool isSoAListValid() const override { return _soaListIsValid; }
long getNumberOfNeighborPairs() const override {
long numPairs = 0;
for (const auto &colorList : _neighborList) {
for (const auto &threadList : colorList) {
numPairs += threadList.size();
}
}
return numPairs;
}
private:
/**
* Called from VarVerletListGeneratorFunctor
*/
void addPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
_neighborList[_currentColor][currentThreadIndex][first].push_back(second);
}
/**
* Called from VarVerletListGeneratorFunctor
*/
void checkPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
// Check all neighbor lists for the pair, but the one that the pair would be in if it was not moved first.
auto &oldThreadNeighborList = _neighborList[_currentColor][currentThreadIndex];
if (isPairInList(oldThreadNeighborList, first, second)) {
for (int color = 0; color < 8; color++) {
for (unsigned int thread = 0; thread < _neighborList[color].size(); thread++) {
if (not isPairInList(_neighborList[_currentColor][currentThreadIndex], first, second)) {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
return;
}
}
}
} else {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
}
}
/**
* Helper method for checkPair()
* @return True, if the pair is present, false otherwise.
*/
bool isPairInList(ThreadNeighborList ¤tNeighborList, Particle *first, Particle *second) {
auto iteratorFound = std::find(currentNeighborList[first].begin(), currentNeighborList[first].end(), second);
return iteratorFound != currentNeighborList[first].end();
}
private:
/**
* The internal AoS neighbor list. For format, see getInternalNeighborList().
*/
std::array<ColorNeighborList, 8> _neighborList;
/**
* The LinkedCells object this neighbor list should use to build.
*/
LinkedCells<Particle, typename VerletListHelpers<Particle>::VerletListParticleCellType,
typename VerletListHelpers<Particle>::SoAArraysType> *_baseLinkedCells;
/**
* The internal SoA neighbor list. For format, see getInternalSoANeighborList().
*/
std::array<SoAColorNeighborList, 8> _soaNeighborList;
/**
* The SoA used.
*/
SoA<typename Particle::SoAArraysType> _soa;
/**
* If the SoA is valid, see isSoAListValid().
*/
bool _soaListIsValid;
/**
* The current color in the traversal during the build of the neighbor list.
*/
static int _currentColor;
#if defined(AUTOPAS_OPENMP)
#pragma omp threadprivate(_currentColor)
#endif
/**
* Used in checkNeighborListValidity(). Set to false in the pair generating functor.
*/
std::atomic<bool> _allPairsPresent;
};
template <class Particle>
int VerletNeighborListAsBuild<Particle>::_currentColor = 0;
} // namespace autopas
|
core_clag2z.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions mixed zc -> ds
*
**/
#include <plasma_core_blas.h>
#include "core_lapack.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup core_lag2
*
* Converts m-by-n matrix A from single complex to double complex precision.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix As.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix As.
* n >= 0.
*
* @param[in] As
* The ldas-by-n matrix in single complex precision to convert.
*
* @param[in] ldas
* The leading dimension of the matrix As.
* ldas >= max(1,m).
*
* @param[out] A
* On exit, the converted lda-by-n matrix in double complex precision.
*
* @param[in] lda
* The leading dimension of the matrix A.
* lda >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_clag2z(int m, int n,
plasma_complex32_t *As, int ldas,
plasma_complex64_t *A, int lda)
{
LAPACKE_clag2z_work(LAPACK_COL_MAJOR, m, n, As, ldas, A, lda);
}
/******************************************************************************/
void plasma_core_omp_clag2z(int m, int n,
plasma_complex32_t *As, int ldas,
plasma_complex64_t *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:As[0:ldas*n]) \
depend(out:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_clag2z(m, n, As, ldas, A, lda);
}
}
|
ex05.c | /* Copyright (c) 2019 CSC Training */
/* Copyright (c) 2021 ENCCS */
#include <stdio.h>
#include <math.h>
#define NX 102400
int main(void)
{
double vecA[NX],vecB[NX],vecC[NX];
double r=0.2;
/* Initialization of vectors */
for (int i = 0; i < NX; i++) {
vecA[i] = pow(r, i);
vecB[i] = 1.0;
}
/* dot product of two vectors */
#pragma omp target data map(from:vecC[0:NX])
{
#pragma omp target map(to:vecA[0:NX],vecB[0:NX])
for (int i = 0; i < NX; i++) {
vecC[i] = vecA[i] * vecB[i];
}
/* Initialization of vectors again */
for (int i = 0; i < NX; i++) {
vecA[i] = 0.5;
vecB[i] = 2.0;
}
#pragma omp target map(to:vecA[0:NX],vecB[0:NX])
for (int i = 0; i < NX; i++) {
vecC[i] = vecC[i] + vecA[i] * vecB[i];
}
}
double sum = 0.0;
/* calculate the sum */
for (int i = 0; i < NX; i++) {
sum += vecC[i];
}
printf("The sum is: %8.6f \n", sum);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__identity_fc32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint16)
// op(A') function: GB (_unop_tran__identity_fc32_uint16)
// C type: GxB_FC32_t
// A type: uint16_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint16)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc32)
// C=scalar+B GB (_bind1st__iseq_fc32)
// C=scalar+B' GB (_bind1st_tran__iseq_fc32)
// C=A+scalar GB (_bind2nd__iseq_fc32)
// C=A'+scalar GB (_bind2nd_tran__iseq_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_iseq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC32 || GxB_NO_ISEQ_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_iseq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_iseq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifndef OWL_CORE_CONV_IMPL
#define OWL_CORE_CONV_IMPL
/*
* Calculate the block sizes for convolution operations.
* Code heavily inspired by Eigen (http://eigen.tuxfamily.org/).
*/
#define IM2COL_THRESHOLD 512 * 1024
#define ALIGN_SIZE 32 // for AVX address alignment
// The effect of calculating block size according to cache sizes is yet to be
// proved here since we use OpenBLAS GEMM directly; also, note that we
// calculate `InputMatrix x KernelMatrix`, not the other way around.
void compute_block_sizes(int* kp, int* mp, int* np, int typesize) {
int l1, l2, l3;
query_cache_sizes(&l1, &l2, &l3);
// set the cache sizes to small numbers when debugging
int k = *kp;
int m = *mp;
int n = *np;
if (fmaxf(k, fmaxf(m, n)) < 50) {
return;
}
int nr = 4;
int num_reg = 16;
int mr = num_reg / (2 * nr) * typesize;
int k_strip = 8;
int k_div = (mr + nr) * typesize;
int k_sub = mr * nr * typesize;
const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1);
const int old_k = k;
if (k > max_kc) {
k = (k % max_kc) == 0 ? max_kc
: max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1)));
//assert (old_k / k == old_k / max_kc);
}
int max_nc;
const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864
const int lhs_bytes = m * k * typesize;
const int rest_l1 = l1 - k_sub - lhs_bytes;
if (rest_l1 >= nr * k * typesize) {
max_nc = rest_l1 / (k * typesize);
} else {
max_nc = (3 * actual_l2) / (4 * max_kc * typesize);
}
int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1));
if (n > nc) {
n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1))));
} else if (old_k == k) {
int kn_size = k * n * typesize;
int actual_lm = actual_l2;
int max_mc = m;
if (kn_size < 1024) {
actual_lm = l1;
} else if (l3 != 0 && kn_size <= 32768) {
actual_lm = l2;
max_mc = fminf(576, max_mc);
}
int mc = fminf(actual_lm / (3 * k * typesize), max_mc);
if (mc > mr) {
mc -= mc % mr;
}
else if (mc == 0) {
*kp = k; *mp = m; *np = n;
return;
}
m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1))));
}
*kp = k; *mp = m; *np = n;
return;
}
#endif /* OWL_CORE_CONV_IMPL */
#ifdef OWL_ENABLE_TEMPLATE
#ifdef AVX_PSIZE
/*
* Fill in temporary input matrix from input tensor with vectorisation.
* Currently only support AVX instruction set.
*/
void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k,
int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart,
int rstart, int input_cols, int input_rows, short reverse_mode
) {
// assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input
// matrix can always be loaded consecutively by a step of AVX_PSIZE
for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREA(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
}
*cmk_ptr += AVX_PSIZE;
}
return;
}
void ACX_FUN_LOAD (load_sub_matrix, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc,
int k, int kernel_ri, int input_ri, int in_channel, int idx_base,
int cstart, int rstart, int input_cols, int input_rows,
int kernel_rows, short reverse_mode
){
int ik = 0;
// first, load `kc_strip` numbers with a step of AVX_PSIZE;
// assume `kc_strip % AVX_PSIZE == 0`
for ( ; ik < kc_strip; ik += AVX_PSIZE) {
const int cr_set[2] = {(k + ik) / in_channel,
(k + ik + AVX_PSIZE - 1) / in_channel};
const int c_set[2] = {cr_set[0] / kernel_rows,
cr_set[1] / kernel_rows};
const int cols[2] = {cstart + c_set[0], cstart + c_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (cols[0] >= input_cols || cols[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
else if (cols[0] == cols[1]) {
const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows,
cr_set[1] - c_set[1] * kernel_rows};
const int rows[2] = {rstart + r_set[0], rstart + r_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (rows[0] >= input_rows || rows[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
// next AVX_PSIZE numbers can be loaded consecutively
else if (rows[0] >= 0 && rows[1] < input_rows) {
int ki = k + ik - cr_set[0] * in_channel;
int input_index = idx_base + cols[0] * input_ri
+ rows[0] * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREU(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
*cmk_ptr += AVX_PSIZE;
continue;
}
}
// previous special cases do not apply; calculate input index one by one
for (int ip = 0; ip < AVX_PSIZE; ip++) {
int kc = (k + ik + ip) / kernel_ri;
int kri = (k + ik + ip) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
}
// second, load the rest `actual_kc - kc_strip` numbers
for (; ik < actual_kc; ik++) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
return;
}
#endif /* AVX_PSIZE */
/*
* GEBP-based implementation. See Goto et.al [08] for detail.
*/
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_cr = kernel_cols * kernel_rows;
const int kernel_ri = kernel_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
// if generated input matrix is small enough, use im2col implementation
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
for (int k = 0; k < kernel_cri; k += kc) {
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
int actual_kc = fminf(k + kc, kernel_cri) - k;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
// iterate along each row of the generated input matrix; processing four
// rows in parallel with the help of e.g. OpenMP should be possible
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_base = b * input_cri;
// fill in the sub input matrix
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
int idx_kn_base = k * out_channel;
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
// fill in the kernel matrix
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
actual_mc, actual_nc, actual_kc, ALPHA,
temp_mk, actual_kc, temp_kn, actual_nc,
BETA, temp_mn, actual_nc);
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = (ix + m) * out_channel + (iy + n);
output_ptr[index_mn] += temp_mn[cmn++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
idx_mn_base += n;
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
actual_mc, actual_kc, actual_nc, ALPHA,
temp_mn, actual_nc, temp_kn, actual_nc,
BETA, temp_mk, actual_kc);
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 1);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
input_ptr[input_index] += temp_mk[cmk];
}
cmk++;
}
#endif
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#if defined(AVX_PSIZE) && defined(_WIN32)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = _aligned_malloc(mc * kc * sizeof(TYPE), ALIGN_SIZE);
if (temp_mk == NULL) exit(1);
#elif defined(AVX_PSIZE)
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_mn_base += n;
idx_kn_base += n;
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
memset(temp_kn, 0, nc * kc * sizeof(TYPE));
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
actual_nc, actual_kc, actual_mc, ALPHA,
temp_mn, actual_nc, temp_mk, actual_kc,
BETA, temp_kn, actual_kc);
int cnk = 0;
for (int jn = 0; jn < actual_nc; jn++) {
for (int ik = 0; ik < actual_kc; ik++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
kernel_ptr[index_kn] += temp_kn[cnk++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE caml_input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
caml_input_val = *(input_ptr + input_idx);
} else {
caml_input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += caml_input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE caml_output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE caml_input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
caml_input_val = *(input_ptr + input_idx);
} else {
caml_input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += caml_output_val * caml_input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE caml_output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += caml_output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE caml_input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
caml_input_val = *(input_ptr + input_idx);
} else {
caml_input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += caml_input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE caml_output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE caml_input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
caml_input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += caml_output_val * caml_input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE caml_output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += caml_output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
reduction-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20, a[n],suma=10;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) {n=20; printf("n=%d",n);}
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for reduction(+:suma)
for (i=0; i<n; i++) suma += a[i];
printf("Tras 'parallel' suma=%d\n",suma);
}
|
GB_unaryop__minv_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_fp64
// op(A') function: GB_tran__minv_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_fp64
(
uint8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.c | // This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Coloma Ballester <coloma.ballester@upf.edu>
// Copyright (C) 2013-2014 J. F. Garamendi <jf.garamendi@upf.edu>
// All rights reserved.
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
double *me_sgauss(double std, int n) {
int i, shift;
double sum, v;
double *out;
v = (0.5 * (double) (n - 1)) / (double) (std);
v = 0.5 * v * v / log(10.);
out = (double *) malloc(sizeof(double) * n);
if (!out) {
printf("Not enough memory for out in sgauss.\n");
exit(1);
}
shift = -0.5 * (double) (n - 1);
if (n == 1) {
out[0] = 1.0;
} else {
/* store Gaussian signal */
for (i = (n + 1) / 2; i--;) {
v = ((double) i + (double) shift) / (double) std;
out[i] = out[n - 1 - i] = exp(-0.5 * v * v);
}
/* normalize to get unit mass */
for (sum = 0.0, i = n; i--;)
sum += (double) out[i];
for (i = n; i--;)
out[i] /= sum;
}
return (out);
}
void me_sepconvol(double *in, double *out, int nx, int ny, double *filter_x,
double *filter_y, int size_x, int size_y) {
double *tmp;
int nx1, ny1, x, y, org, i, s;
double sum;
nx1 = nx - 1;
ny1 = ny - 1;
/* Initialize temporal image */
tmp = (double *) malloc(sizeof(double) * nx * ny);
/* convolution along x axis */
org = (size_x - 1) >> 1;
for (y = 0; y <= ny1; y++) {
for (x = 0; x <= nx1; x++) {
sum = 0.0;
for (i = size_x; i--;) {
s = x - (i - org);
while ((s < 0) || (s > nx1)) {
if (s < 0)
s = 0 - s - 1;
if (s > nx1)
s = nx1 - (s - nx1) + 1;
}
sum += filter_x[i] * in[y * nx + s];
}
tmp[y * nx + x] = sum;
}
}
/* convolution along y axis */
org = (size_y - 1) >> 1;
for (y = 0; y <= ny1; y++) {
for (x = 0; x <= nx1; x++) {
sum = 0.0;
for (i = size_y; i--;) {
s = y - (i - org);
while ((s < 0) || (s > ny1)) {
if (s < 0)
s = 0 - s - 1;
if (s > ny1)
s = ny1 - (s - ny1) + 1;
}
sum += filter_y[i] * tmp[s * nx + x];
}
out[y * nx + x] = sum;
}
}
/* Free memory */
free(tmp);
}
void me_save_image(double *in, int nx, int ny) {
FILE *fp;
fp = fopen("image.bin", "w");
fwrite(in, sizeof(double), nx * ny, fp);
fclose(fp);
printf("File has been written to image.bin\n");
}
int me_median_compare(const void *i, const void *j) {
double *val1, *val2;
val1 = (double *) i;
val2 = (double *) j;
if (*val1 < *val2)
return -1;
if (*val1 > *val2)
return 1;
return 0;
}
void me_median_filtering(double *in, int nx, int ny, int wsize) {
double median_vector[wsize * wsize], *out;
int i, pixels, nrow, ncol, border;
int x, y, xx, yy, xx0, yy0;
border = wsize >> 1;
out = (double *) malloc(sizeof(double) * nx * ny);
nrow = ny;
ncol = nx;
for (x = 0; x < ncol; x++) {
for (y = 0; y < nrow; y++) {
i = 0;
for (yy = y - border; yy <= y + border; yy++)
for (xx = x - border; xx <= x + border; xx++) {
xx0 = xx;
yy0 = yy;
/* Symmetry */
if (xx0 < 0)
xx0 = -xx0 - 1;
if (xx0 >= ncol)
xx0 = 2 * ncol - xx0 - 1;
if (yy0 < 0)
yy0 = -yy0 - 1;
if (yy0 >= nrow)
yy0 = 2 * nrow - yy0 - 1;
/* Access data */
median_vector[i++] = in[yy0 * ncol + xx0];
}
qsort(median_vector, i, sizeof(double), me_median_compare);
out[y * ncol + x] = median_vector[i / 2];
}
}
pixels = nx * ny;
for (i = 0; i < pixels; i++)
in[i] = out[i];
free(out);
}
/* BORRAR SIN COMPARARLO CON NADA
void warping(
const double *input, const double *u, const double *v,
double *output, const int nx, const int ny
)
{
#pragma omp parallel
{
#pragma omp for schedule(dynamic) nowait
for(int j = 0; j < ny; j++)
for(int i = 0; i < nx; i++)
{
const double uu = (double) (i + u[i + nx * j]);
const double vv = (double) (j + v[i + nx * j]);
if ((uu < 0) || (uu > (nx - 1)) ||
(vv < 0) || (vv > (ny - 1)))
{
output[i + nx * j] = 0;
}
else
{
output[i + nx * j] = me_interpolate_bicubic(input, nx, ny, uu, vv);
}
}
}
}
*/
/**
*
* Function to normalize the images between 0 and 255
*
**/
void image_normalization_3(double *I0, double *I1, double *I2, int size) {
double max0, min0, max1, min1, max2, min2, max, min;
getminmax(&min0, &max0, I0, size);
getminmax(&min1, &max1, I1, size);
getminmax(&min2, &max2, I2, size);
max = max0;
min = min0;
if (max1 > max)
max = max1;
if (min1 < min)
min = min1;
if (max2 > max)
max = max2;
if (min2 < min)
min = min2;
const double den = max - min;
for (int i = 0; i < size; i++) {
I0[i] = 255.0 * (I0[i] - min) / den;
I1[i] = 255.0 * (I1[i] - min) / den;
I2[i] = 255.0 * (I2[i] - min) / den;
}
}
void image_normalization_4(const double *I_1, // input image-1
const double *I0, // input image0
const double *I1, // input image1
const double *filtI0, //Smooth vefsion of I0
double *I_1n, // normalized output image -1
double *I0n, // normalized output image0
double *I1n, // normalized output image1
double *filtI0n, // normalized output image filtI0
int size // size of the image
) {
double max_1, max0, max1, maxf0, min_1, min0, minf0, min1;
// obtain the max and min of each image
getminmax(&min_1, &max_1, I_1, size);
getminmax(&min0, &max0, I0, size);
getminmax(&min1, &max1, I1, size);
getminmax(&minf0, &maxf0, filtI0, size);
// obtain the max and min of images
double max = (max_1 > max0) ? max_1 : max0;
max = (max > max1) ? max : max1;
max = (max > maxf0) ? max : maxf0;
double min = (min_1 < min0) ? min_1 : min0;
min = (min < min1) ? min : min1;
min = (min < minf0) ? min : minf0;
const double den = max - min;
if (den > 0)
// normalize both images
for (int i = 0; i < size; i++) {
I_1n[i] = 255.0 * (I_1[i] - min) / den;
I0n[i] = 255.0 * (I0[i] - min) / den;
I1n[i] = 255.0 * (I1[i] - min) / den;
filtI0n[i] = 255.0 * (filtI0[i] - min) / den;
}
else
// copy the original images
for (int i = 0; i < size; i++) {
I_1n[i] = I_1[i];
I0n[i] = I0[i];
I1n[i] = I1[i];
filtI0n[i] = filtI0[i];
}
}
/**
*
* Compute the max and min of an array
*
**/
void getminmax(double *min, // output min
double *max, // output max
const double *x, // input array
int n // array size
) {
*min = *max = x[0];
for (int i = 1; i < n; i++) {
if (x[i] < *min)
*min = x[i];
if (x[i] > *max)
*max = x[i];
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
TransformController.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <set>
#include "Transformer.h"
#include "SequenceEnumerator.h"
#include "ExceptionCapture.h"
namespace Microsoft { namespace MSR { namespace CNTK {
// A pair of a transformer and the stream name to which the transformer should be a applied.
struct Transformation
{
TransformerPtr m_transformer;
std::wstring m_streamName;
};
// A class responsible for applying a list of transformers to sequences and stream descriptions.
// Delegates retrieving of sequences to another sequence provider(such as randomizer) and applies transformations after retrieving.
// Usually used by the packer to get next set of sequences.
class TransformController : public SequenceEnumerator
{
public:
TransformController(const std::vector<Transformation>& transformations, SequenceEnumeratorPtr sequenceProvider)
: m_sequenceProvider(sequenceProvider)
{
// Applying transformations to stream descriptions,
// i.e. a transformation can change a stream from dense to sparse.
std::vector<StreamDescriptionPtr> transformedStreams = m_sequenceProvider->GetStreamDescriptions();
for (auto& t : transformations)
{
size_t streamId = GetStreamId(t.m_streamName, transformedStreams);
m_transformations.push_back(std::make_pair(t, streamId));
transformedStreams[streamId] = std::make_shared<StreamDescription>(t.m_transformer->Transform(*transformedStreams[streamId]));
}
m_outputStreams = transformedStreams;
}
// Sets configuration for the current epoch.
// Some transformers can change their config based on the epoch.
virtual void StartEpoch(const EpochConfiguration &config) override
{
assert(m_sequenceProvider != nullptr);
for (auto& t : m_transformations)
{
t.first.m_transformer->StartEpoch(config);
}
m_sequenceProvider->StartEpoch(config);
}
// Description of streams that the transformer provides.
virtual std::vector<StreamDescriptionPtr> GetStreamDescriptions() const override
{
return m_outputStreams;
}
// Gets next sequences up to a maximum count of samples,
// applying transformers to particular streams.
virtual Sequences GetNextSequences(size_t sampleCount) override
{
assert(m_sequenceProvider != nullptr);
Sequences sequences = m_sequenceProvider->GetNextSequences(sampleCount);
if (sequences.m_data.empty())
{
return sequences;
}
ExceptionCapture capture;
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < sequences.m_data.front().size(); ++j)
{
capture.SafeRun([this, &sequences](int sequenceId)
{
for (auto& t : m_transformations)
{
sequences.m_data[t.second][sequenceId] = t.first.m_transformer->Transform(sequences.m_data[t.second][sequenceId]);
}
}, j);
}
capture.RethrowIfHappened();
return sequences;
}
private:
size_t GetStreamId(const std::wstring streamName, const std::vector<StreamDescriptionPtr>& streams) const
{
for (const auto& s : streams)
{
if (s->m_name == streamName)
{
return s->m_id;
}
}
assert(false);
LogicError("Unexpected stream specified for transformation.");
}
SequenceEnumeratorPtr m_sequenceProvider;
std::vector<StreamDescriptionPtr> m_outputStreams;
std::vector<std::pair<Transformation, size_t>> m_transformations;
};
}}}
|
skel-rhf-calc.c | /*
* Interface routines to ofmo_*() function
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include "ofmo-parallel.h"
#include "ofmo-prof.h"
#include "ofmo-scf.h"
#include "ofmo-integ.h"
#include "ofmo-twoint.h"
#include "ofmo-mat.h"
#include "skel-rhf-main.h"
#include "skel-rhf-data.h"
#include "skel-rhf-calc.h"
// ----------------
int skel_rhf_init_array(
skel_rhf_config_t* config, skel_rhf_data_t* data,
skel_rhf_array_t* array )
{
int nao = data->nao;
int nao2 = nao*(nao+1)/2;
int nat = data->natom;
double *D, *G, *TEMP, *S, *H, *F, *C, *moe;
double *aopop, *atpop;
D = (double*)malloc(sizeof(double) * nao2);
if (D==NULL) return -1;
S = (double*)malloc(sizeof(double) * nao2);
if (S==NULL) return -1;
H = (double*)malloc(sizeof(double) * nao2);
if (H==NULL) return -1;
C = (double*)malloc(sizeof(double) * nao*nao);
if (C==NULL) return -1;
moe = (double*)malloc(sizeof(double) * nao);
if (moe==NULL) return -1;
aopop = (double*)malloc(sizeof(double) * nao);
if (aopop==NULL) return -1;
atpop = (double*)malloc(sizeof(double) * nat);
if (atpop==NULL) return -1;
array->D = D;
array->S = S;
array->H = H;
array->C = C;
array->moe = moe;
array->aopop = aopop;
array->atpop = atpop;
return 0;
}
// ----------------
void skel_rhf_fin_array( skel_rhf_array_t* array )
{
Free(array->D);
Free(array->S);
Free(array->H);
Free(array->C);
Free(array->moe);
Free(array->aopop);
Free(array->atpop);
}
// ----------------
int skel_rhf_oneint(
skel_rhf_config_t* config, skel_rhf_data_t* data,
skel_rhf_array_t* array )
{
int nao = data->nao;
int nao2 = nao*(nao+1)/2;
double *S = array->S;
double *H = array->H;
memset( S, '\0', sizeof(double)*nao2 );
memset( H, '\0', sizeof(double)*nao2 );
#pragma omp parallel
{
int nworkers, workerid;
nworkers = omp_get_num_threads();
workerid = omp_get_thread_num();
ofmo_integ_oneint_sorted(
nworkers, workerid,
data->maxlqn, data->leading_cs,
data->shel_tem, data->shel_atm, data->shel_add, data->shel_ini,
data->atom_x, data->atom_y, data->atom_z,
data->prim_exp, data->prim_coe,
data->natom, data->atomic_number, S, H );
}
ofmo_scale_diag( nao, 0.5e0, H );
return 0;
}
// ----------------
int skel_rhf_init_density(
skel_rhf_config_t* config, skel_rhf_data_t* data,
skel_rhf_array_t* array )
{
int nao = data->nao;
int nao2 = nao*(nao+1)/2;
double *D = array->D;
if (strlen(config->density) > 0) {
if (config->myrank == 0) {
read_packed_matrix_binary( nao, config->density, D );
}
MPI_Bcast( D, nao2, MPI_DOUBLE, 0, config->comm );
} else {
ofmo_scf_init_density_ehuckel(
data->natom, data->ncs, data->nao, data->maxlqn, data->nocc,
data->atomic_number, data->leading_cs,
data->shel_atm, data->shel_ini,
array->S, array->D, array->aopop, array->atpop );
}
return 0;
}
// ----------------
int skel_rhf_twoint_first(
skel_rhf_config_t* config, skel_rhf_data_t* data,
skel_rhf_array_t* array )
{
#pragma omp parallel
{
int incore2 = -1;
int workerid, nworkers;
int nthreads, mythread;
size_t my_buffer_size = 0;
// for profiling
double et0, et1;
nthreads = omp_get_num_threads();
mythread = omp_get_thread_num();
workerid = config->myrank * nthreads + mythread;
nworkers = config->nprocs * nthreads;
my_buffer_size = config->eribfsz/nthreads;
int offset = 0;
ofmo_integ_set_loop_offset( mythread, offset );
incore2 = ofmo_integ_twoint_first( nworkers, workerid, my_buffer_size,
data->maxlqn, data->shel_atm, data->shel_ini,
data->atom_x, data->atom_y, data->atom_z,
data->leading_cs_pair,
data->csp_schwarz, data->csp_ics, data->csp_jcs,
data->csp_leading_ps_pair,
data->psp_zeta, data->psp_dkps, data->psp_xiza );
}
return 0;
}
// ----------------
double skel_rhf_scf(
skel_rhf_config_t* config, skel_rhf_data_t* data,
skel_rhf_array_t* array )
{
double energy, Enuc;
#pragma omp parallel
{
int mythread = omp_get_thread_num();
int offset = 0;
ofmo_integ_set_loop_offset( mythread, offset );
}
// nuclear repulsion
Enuc = ofmo_calc_nuclear_repulsion( data->natom, data->atomic_number,
data->atom_x, data->atom_y, data->atom_z);
if ( config->myrank == 0 ) {
printf( "Enuc = %17.10f\n", Enuc );
printf( "scfd scfe eps_ps4 eps_sch eps_eri: %8.2e %8.2e %8.2e %8.2e %8.2e\n",
config->scfd, config->scfe, config->eps_ps4, config->eps_sch, config->eps_eri);
fflush(stdout);
}
// scf
ofmo_twoint_eps_ps4(config->eps_ps4);
ofmo_twoint_eps_eri(config->eps_eri);
ofmo_twoint_eps_sch(config->eps_sch);
if (!config->dryrun) {
ofmo_scf_rhf( config->comm,
data->maxlqn, Enuc, data->ncs, data->nao,
data->leading_cs,
data->shel_atm, data->shel_ini,
data->atom_x, data->atom_y, data->atom_z,
data->leading_cs_pair,
data->csp_schwarz, data->csp_ics, data->csp_jcs,
data->csp_leading_ps_pair,
data->psp_zeta, data->psp_dkps, data->psp_xiza,
data->natom, data->nocc,
array->S, array->H,
config->maxscfcyc, config->scfe, config->scfd,
array->D, array->C, array->moe, &energy );
}
return energy;
}
// ----------------
|
convolution.c | //--------------------------------------------------------------------------//
//
// convolution.c
//
// Created by Josep Lluis Lerida on 11/03/2015
// Modified by Didac Semente Fernandez on 04/04/2016
//
// This program calculates the convolution for PPM images.
// The program accepts an PPM image file, a text definition of the kernel
// matrix and the PPM file for storing the convolution results.
// The program allows to define image partitions for processing larger
// images (>500MB).
// The 2D image is represented by 1D vector for chanel R, G and B.
// The convolution is applied to each chanel separately.
//
//--------------------------------------------------------------------------//
//--------------------------------------------------------------------------//
// -- EXTERNAL LIBRARIES -------------------------------------------------- //
//--------------------------------------------------------------------------//
#include <ctype.h>
#include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
//--------------------------------------------------------------------------//
#include "lib/convolution.h"
//--------------------------------------------------------------------------//
// -- MACRO DEFINITION -----------------------------------------------------//
//--------------------------------------------------------------------------//
#define F_MICROS_IN_SECOND 1000000.0
#define TRUE 1
#define FALSE 0
#define REALLOC_MARGIN 10
#define INCREASE_FACTOR 100
//--------------------------------------------------------------------------//
// -- AUXILIARY METHODS ----------------------------------------------------//
//--------------------------------------------------------------------------//
int validateParameters(char**);
double calculateExtraSize(int partitions);
double toSeconds(suseconds_t);
long checkForRealloc(void**, long, long, size_t, long);
long rebuildImage(ImageData, DataBucket*);
//--------------------------------------------------------------------------//
// -- LIBRARY IMPLEMENTATION ---------------------------------------------- //
//--------------------------------------------------------------------------//
// Read the corresponding chunk from the source Image
int readChunk(char* fileName, intmax_t *offset, intmax_t *limit,
DataBucket bucket) {
intmax_t pos = *offset;
int value = 0, mult = 10;
int newValue = FALSE;
int increase = INCREASE_FACTOR;
long k = bucket->offset, bucketMemSize;
char c;
FILE *fp;
int **temp = NULL;
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strict aliasing
if((fp = openFile(fileName, "r")) == NULL) {
perror("Error: ");
return -1;
}
if(fseek(fp, pos, SEEK_SET)) {
perror("Error: ");
return -1;
}
while(pos < *limit) {
pos = ftell(fp);
c = fgetc(fp);
if(isdigit(c)) {
value = (value * mult) + (c - '0');
newValue = TRUE;
} else if(newValue) {
bucket->data[k] = value;
value = 0;
newValue = FALSE;
k++;
// CHECKING IF WE ARE ABOUT TO FILL THE BUCKET
*temp = bucket->data;
bucketMemSize = bucket->msize;
bucket->msize = checkForRealloc((void**) temp, bucket->msize,
k + REALLOC_MARGIN, sizeof(bucket->data[0]), increase);
bucket->data = *temp;
if(bucketMemSize < bucket->msize) {
increase *= 2;
} else if(bucket->msize == -1) {
perror("Error: ");
return -1;
}
}
}
bucket->bsize = k;
fclose(fp);
free(temp);
return 0;
}
// Duplication of the just readed source chunk
// to the destiny image struct chunk
void* duplicateImageChunk(ImageData src, ImageData dst) {
int** temp = NULL;
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strcit aliasing
*temp = dst->R;
dst->rsize = checkForRealloc((void**) temp, dst->rsize, src->rsize,
sizeof(dst->R[0]), src->rsize - dst->rsize);
dst->R = *temp;
*temp = dst->G;
dst->gsize = checkForRealloc((void**) temp, dst->gsize, src->gsize,
sizeof(dst->G[0]), src->gsize - dst->gsize);
dst->G = *temp;
*temp = dst->B;
dst->bsize = checkForRealloc((void**) temp, dst->bsize, src->bsize,
sizeof(dst->B[0]), src->bsize - dst->bsize);
dst->B = *temp;
free(temp);
if(dst->rsize == -1 || dst->bsize == -1 || dst->gsize == -1) {
return NULL;
}
if(memcpy((void*) dst->R, (void*) src->R,
dst->rsize * sizeof(dst->R[0])) == NULL) {
return NULL;
}
if(memcpy((void*) dst->G, (void*) src->G,
dst->gsize * sizeof(dst->G[0])) == NULL) {
return NULL;
}
return memcpy((void*) dst->B, (void*) src->B,
dst->bsize * sizeof(dst->B[0]));
}
// Open kernel file and reading kernel matrix.
// The kernel matrix 2D is stored in 1D format.
KernelData readKernel(char* fileName) {
FILE *fp;
int ksize = 0;
KernelData kern = NULL;
// Opening the kernel file
if((fp = openFile(fileName, "r")) == NULL) {
perror("Error: ");
} else {
// Memory allocation
kern = (KernelData) malloc(sizeof(struct structkernel));
// Reading kernel matrix dimensions
fscanf(fp, "%d,%d,", &kern->kernelX, &kern->kernelY);
ksize = (kern->kernelX * kern->kernelY);
kern->vkern = (float*) malloc(ksize * sizeof(float));
// Reading kernel matrix values
for(int i = 0; i < ksize; i++) {
fscanf(fp, "%f,", &kern->vkern[i]);
}
fclose(fp);
}
return kern;
}
// Open the image file with the convolution results
int initfilestore(ImageData img, FILE** fp, char* fileName, long *position) {
// File with the resulting image is created
if((*fp = openFile(fileName, "w")) == NULL) {
perror("Error: ");
return -1;
}
// Writing image header
fprintf(*fp, "P%d\n%s\n%d %d\n%d\n", img->P, img->comment, img->width,
img->height, img->maxcolor);
*position = ftell(*fp);
return 0;
}
// Writing the image chunk to the resulting file.
int savingChunk(ImageData img, FILE **fp, long *offset, long dataOffst,
long count){
// Writing image partition
fseek(*fp, *offset, SEEK_SET);
for(long i = dataOffst; i < count; i++) {
fprintf(*fp, "%d %d %d\n", img->R[i], img->G[i], img->B[i]);
}
*offset = ftell(*fp);
return 0;
}
// This function frees the space allocated for the image structure.
void freeImagestructure(ImageData *src) {
free((*src)->comment);
free((*src)->R);
free((*src)->G);
free((*src)->B);
free(*src);
}
//--------------------------------------------------------------------------//
// 2D convolution
// 2D data are usually stored in computer memory as contiguous 1D array.
// So, we are using 1D array for 2D data.
// 2D convolution assumes the kernel is center originated, which means, if
// kernel size 3 then, k[-1], k[0], k[1]. The middle of index is always 0.
// The following programming logics are somewhat complicated because of using
// pointer indexing in order to minimize the number of multiplications.
//
//
// signed integer (32bit) version:
//--------------------------------------------------------------------------//
int convolve2D(int* in, int* out, int dataSizeX, int dataSizeY, int dataOff,
float* kernel, int kernelSizeX, int kernelSizeY) {
int m, n;
int *inPtr = NULL, *inPtr2 = NULL, *outPtr = NULL;
float *kPtr = NULL;
int kCenterX, kCenterY;
int rowMin, rowMax; // to check boundary of input array
int colMin, colMax; //
float sum; // temp accumulation buffer
// Parameter validatin
if(!in || !out || !kernel || dataSizeX <= 0 || kernelSizeX <= 0) {
return -1;
}
// Find centeral position of kernel (half of kernel size)
kCenterX = (int) kernelSizeX / 2;
kCenterY = (int) kernelSizeY / 2;
// init working pointers
// note that it is shifted (kCenterX, kCenterY),
inPtr = inPtr2 = &in[(dataSizeX * kCenterY) + kCenterX];
outPtr = out;
kPtr = kernel;
// start convolution
// number of rows
for(register int i = 0; i < dataSizeY; ++i) {
// compute the range of convolution, the current row of kernel
// should be between these
rowMax = i + kCenterY;
rowMin = i - dataSizeY + kCenterY;
// number of columns
for(register int j = 0; j < dataSizeX; ++j) {
// compute the range of convolution, the current column of kernel
// should be between these
colMax = j + kCenterX;
colMin = j - dataSizeX + kCenterX;
sum = 0.0f; // set to 0 before accumulate
// flip the kernel and traverse all the kernel values
// multiply each kernel value with underlying input data
// kernel rows
for(m = 0; m < kernelSizeY; ++m) {
// check if the index is out of bound of input array
if(m <= rowMax && m > rowMin) {
for(n = 0; n < kernelSizeX; ++n) {
// check the boundary of array
if(n <= colMax && n > colMin) {
sum += *(inPtr - n) * (*kPtr);
}
++kPtr;// next kernel
}
} else {
// out of bound, move to next row of kernel
kPtr += kernelSizeX;
}
// move input data 1 raw up
inPtr -= dataSizeX;
}
// convert integer number
if(sum >= 0.0f) {
*outPtr = (int)(sum + 0.5f);
} else { // For using with image editors like GIMP or others...
*outPtr = (int)(sum - 0.5f);
}
kPtr = kernel; // reset kernel to (0,0)
inPtr = ++inPtr2; // next input
++outPtr; // next output
}
}
return 0;
}
//--------------------------------------------------------------------------//
// -- AUXILIARY METHODS IMPLEMENTATION ------------------------------------ //
//--------------------------------------------------------------------------//
int validateParameters(char **args) {
if(access(args[1], F_OK)) {
perror("Input image error");
return -1;
} else if(access(args[2], F_OK)) {
perror("Kernel file error");
return -1;
} else if(atoi(args[4]) < 1) {
printf("Partition number error: value less than 1\n");
return -1;
} else if(atoi(args[5]) < 1) {
printf("Thread number error: value less than 1\n");
return -1;
}
return 0;
}
double calculateExtraSize(int partitions) {
double x = (double) partitions;
return (x / (15 + 3*x)) - 0.058f;
}
double toSeconds(suseconds_t micros) {
return (micros / F_MICROS_IN_SECOND);
}
long checkForRealloc(void **ptr, long currSize, long margin, size_t posSize,
long reallocIncrement) {
long newSize = currSize;
void *temp = NULL;
if(newSize < margin) {
newSize = newSize + reallocIncrement;
if((temp = realloc(*ptr, newSize * posSize)) == NULL) {
free(*ptr);
return -1;
} else {
*ptr = temp;
}
}
return newSize;
}
// Method used to fill the ImageData structure using the data found in the
// DataBucket list.
long rebuildImage(ImageData img, DataBucket *bucks) {
long r, g, b, tsize;
long rasterR, rasterG, rasterB;
long increaseR, increaseG, increaseB;
int flip, **temp;
r = g = b = 0L;
flip = 0;
increaseR = increaseG = increaseB = INCREASE_FACTOR;
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strict aliasing
for(int i = 0; i < 1; i++) {
for(int j = 0; j < bucks[i]->bsize; j++) {
switch(flip) {
case 0:
img->R[r] = bucks[i]->data[j];
r++;
rasterR = img->rsize;
*temp = img->R;
img->rsize = checkForRealloc((void**) temp, img->rsize,
(r + REALLOC_MARGIN), sizeof(img->R[0]), increaseR);
img->R = *temp;
if(rasterR < img->rsize) {
increaseR *= 2;
}
break;
case 1:
img->G[g] = bucks[i]->data[j];
g++;
rasterG = img->gsize;
*temp = img->G;
img->gsize = checkForRealloc((void**) temp, img->gsize,
(g + REALLOC_MARGIN), sizeof(img->G[0]), increaseG);
img->G = *temp;
if(rasterG < img->gsize) {
increaseG *= 2;
}
break;
case 2:
img->B[b] = bucks[i]->data[j];
b++;
rasterB = img->bsize;
*temp = img->B;
img->bsize = checkForRealloc((void**) temp, img->bsize,
(b + REALLOC_MARGIN), sizeof(img->B[0]), increaseB);
img->B = *temp;
if(rasterB < img->bsize) {
increaseB *= 2;
}
break;
}
flip = (flip + 1) % 3;
}
bucks[i]->offset = 0;
}
free(temp);
tsize = (r + g + b);
// Check for unaligned rasters
// Either 1 Blue is missing from the image or
// both 1 Green and 1 Blue.
switch(tsize % 3) {
case 0:
break;
case 2:
bucks[0]->offset += 1;
tsize -= 1;
case 1:
bucks[0]->offset += 1;
tsize -= 1;
break;
}
return (tsize / 3);
}
//--------------------------------------------------------------------------//
// - MAIN METHOD -----------------------------------------------------------//
//--------------------------------------------------------------------------//
int main(int argc, char **argv) {
int c, offset, nThreads;
int partitions, halo, haloSize;
int imgWidth, imgHeight;
int convOffset, convSize;
long position, chunkSize, iterSize, bucketSize;
double start, tstart, tend, tread, tcopy, tconv, tstore, treadk;
float extraSizeFactor;
struct timeval tim;
char *sourceFile, *outFile, *kernFile;
char cwd[1024];
FILE *fpsrc, *fpdst;
ImageData source, output;
KernelData kern;
ImageChunk *chunkLst;
DataBucket *buckets;
c = offset = 0;
position = 0L;
tstart = tend = tread = tcopy = tconv = tstore = treadk = 0.0;
sourceFile = outFile = kernFile = NULL;
fpsrc = fpdst = NULL;
source = output = NULL;
kern = NULL;
extraSizeFactor = 1.0f;
if(argc != 6) {
printf("Usage: %s <image-file> <kernel-file> <result-file> "
"<partitions> <threads>\n\n", argv[0]);
printf("- image_file : source image path (*.ppm)\n");
printf("- kernel_file: kernel path (text file with 1D "
"kernel matrix)\n");
printf("- result_file: result image path (*.ppm)\n");
printf("- partitions : Image partitions\n");
printf("- threads : num threads\n\n");
return -1;
}
if(validateParameters(argv) == -1) {
return -1;
}
//Storing parameters
sourceFile = argv[1];
kernFile = argv[2];
outFile = argv[3];
partitions = atoi(argv[4]);
nThreads = atoi(argv[5]);
omp_set_dynamic(FALSE);
omp_set_num_threads(nThreads);
getcwd(cwd, sizeof(cwd));
// READING IMAGE HEADERS, KERNEL Matrix, DUPLICATE IMAGE DATA,
// OPEN RESULTING IMAGE FILE
// Reading kernel matrix
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
tstart = start;
if ((kern = readKernel(kernFile)) == NULL) {
return -1;
}
// The matrix kernel defines the halo size to use with the image.
// The halo is zero when the image is not partitioned.
if (partitions == 1) {
halo = 0;
} else {
halo = kern->kernelY;
}
gettimeofday(&tim, NULL);
treadk = treadk + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
// Reading Image Header. Image properties: Magical number, comment,
// size and color resolution.
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
// Calculating extra size for memory assignment in order to avoid
// calling realloc further in the execution
extraSizeFactor = extraSizeFactor + calculateExtraSize(partitions);
// Memory allocation based on number of partitions and halo size.
if((source = parseFileHeader(sourceFile, &fpsrc, partitions,
halo, extraSizeFactor)) == NULL) {
return -1;
}
imgWidth = source->width;
imgHeight = source->height;
gettimeofday(&tim, NULL);
tread = tread + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
// Duplicate the image struct.
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
if ((output = duplicateImageData(source, partitions, halo,
extraSizeFactor)) == NULL) {
return -1;
}
gettimeofday(&tim, NULL);
tcopy = tcopy + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
// Initialize Image output file. Open the file and store the image header
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
if (initfilestore(output, &fpdst, outFile, &position) != 0) {
perror("Error: ");
return -1;
}
gettimeofday(&tim, NULL);
tstore = tstore + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
bucketSize = (imgWidth * imgHeight * 3) / (partitions * 1);
bucketSize = bucketSize + (imgWidth * halo);
bucketSize = (long) ((float) bucketSize * extraSizeFactor);
chunkLst = calculateChunkSections(&fpsrc, source, partitions);
if ((buckets = initializeBuckets(1, bucketSize)) == NULL) {
perror("Error: ");
return -1;
}
//----------------------------------------------------------------------//
// CHUNK PROCESSING LOOP
//----------------------------------------------------------------------//
while (c < partitions) {
// Reading chunk.
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
if (readChunk(sourceFile, &(chunkLst[c]->start), &(chunkLst[c]->end),
buckets[0])) {
return -1;
}
// Copying data from the DataBucket into the ImageData arrays
iterSize = rebuildImage(source, buckets);
gettimeofday(&tim, NULL);
tread = tread + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
// Discarding incomplete row.
convOffset = (iterSize % imgWidth);
convSize = iterSize - convOffset;
//Applying offset to bucket
buckets[0]->offset += (convOffset * 3);
// Rows to convolve needs to be bigger than kernel size, either way
// there'll be problems in pixel alignment.
haloSize = (halo / 2);
if(c < (partitions - 1)) {
chunkSize = (convSize / imgWidth) - haloSize;
buckets[0]->offset += (imgWidth * (halo-1) * 3);
if(c == 0) {
offset = 0;
} else {
offset = haloSize;
}
} else {
chunkSize = (convSize / imgWidth);
offset = haloSize;
}
// Duplicate the image chunk
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
if (duplicateImageChunk(source, output) == NULL) {
perror("Error: ");
return -1;
}
gettimeofday(&tim, NULL);
tcopy = tcopy + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
//------------------------------------------------------------------//
// - CHUNK CONVOLUTION ---------------------------------------------//
//------------------------------------------------------------------//
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
convolve2D(source->R, output->R, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->G, output->G, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->B, output->B, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
}
}
gettimeofday(&tim, NULL);
tconv = tconv + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
//------------------------------------------------------------------//
// - CHUNK SAVING --------------------------------------------------//
//------------------------------------------------------------------//
gettimeofday(&tim, NULL);
start = tim.tv_sec + toSeconds(tim.tv_usec);
if (savingChunk(output, &fpdst, &position, (offset * imgWidth),
(chunkSize * imgWidth))) {
perror("Error: ");
return -1;
}
gettimeofday(&tim, NULL);
tstore = tstore + (tim.tv_sec + toSeconds(tim.tv_usec) - start);
// Moving previously discarded pixels to the beginning of the bucket
// for the next iteration
c++;
if(c < partitions) {
adjustBucketContents(buckets, 1);
}
}
fclose(fpsrc);
fclose(fpdst);
gettimeofday(&tim, NULL);
tend = tim.tv_sec + toSeconds(tim.tv_usec);
printf("-----------------------------------\n");
printf("| TYPE SIZES (BYTES) |\n");
printf("-----------------------------------\n");
printf("Size of short: ----> %ld\n", sizeof(short));
printf("Size of int: ------> %ld\n", sizeof(int));
printf("Size of long: -----> %ld\n", sizeof(long));
printf("Size of intmax_t: -> %ld\n", sizeof(intmax_t));
printf("Size of size_t: ---> %ld\n", sizeof(size_t));
printf("Size of float: ----> %ld\n", sizeof(float));
printf("Size of double: ---> %ld\n", sizeof(double));
printf("-----------------------------------\n");
printf("| IMAGE INFO |\n");
printf("-----------------------------------\n");
printf("Working directory: %s\n", cwd);
printf("File path: %s\n", sourceFile);
printf("File output: %s\n", outFile);
printf("Header size (bytes): %ld\n", source->headersize);
printf("Raster size (bytes): %jd\n", source->rastersize);
printf("ISizeX : %d\n", imgWidth);
printf("ISizeY : %d\n", imgHeight);
printf("kSizeX : %d\n", kern->kernelX);
printf("kSizeY : %d\n", kern->kernelY);
printf("-----------------------------------\n");
printf("| EXECUTION TIMES |\n");
printf("-----------------------------------\n");
printf("%.6lfs elapsed in reading image file.\n", tread);
printf("%.6lfs elapsed in copying image structure.\n", tcopy);
printf("%.6lfs elapsed in reading kernel matrix.\n", treadk);
printf("%.6lfs elapsed computing the convolution.\n", tconv);
printf("%.6lfs elapsed in writing the resulting image.\n", tstore);
printf("-----------------------------------\n");
printf("%.6lfs elapsed in total.\n", tend-tstart);
printf("-----------------------------------\n");
/*printf("%d %d %d %f\n", atoi(&sourceFile[strlen(sourceFile)-5]),
partitions, nThreads, tconv);*/
//----------------------------------------------------------------------//
// - MEMORY CLEANING --------------------------------------------------//
//----------------------------------------------------------------------//
freeImagestructure(&source);
freeImagestructure(&output);
freeDataBuckets(buckets, 1);
freeChunkList(chunkLst, partitions);
free(kern->vkern);
free(kern);
//----------------------------------------------------------------------//
return 0;
}
//--------------------------------------------------------------------------//
|
calculate_signed_distance_to_3d_condition_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
// Daniel Baumgaertner
// Johannes Wolf
//
#if !defined(KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED )
#define KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "processes/process.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "spatial_containers/octree_binary.h"
#include "utilities/spatial_containers_configure.h"
#include "utilities/timer.h"
#include "utilities/math_utils.h"
#include "utilities/geometry_utilities.h"
#include "geometries/triangle_3d_3.h"
#include "utilities/body_normal_calculation_utils.h"
#include "utilities/parallel_utilities.h"
namespace Kratos
{
class DistanceSpatialContainersConditionConfigure
{
public:
class CellNodeData
{
double mDistance;
double mCoordinates[3];
std::size_t mId;
public:
double& Distance(){return mDistance;}
double& X() {return mCoordinates[0];}
double& Y() {return mCoordinates[1];}
double& Z() {return mCoordinates[2];}
double& operator[](int i) {return mCoordinates[i];}
std::size_t& Id(){return mId;}
};
///@name Type Definitions
///@{
enum { Dimension = 3,
DIMENSION = 3,
MAX_LEVEL = 12,
MIN_LEVEL = 2 // this cannot be less than 2!!!
};
typedef Point PointType; /// always the point 3D
typedef std::vector<double>::iterator DistanceIteratorType;
typedef PointerVectorSet<
GeometricalObject::Pointer,
IndexedObject,
std::less<typename IndexedObject::result_type>,
std::equal_to<typename IndexedObject::result_type>,
Kratos::shared_ptr<typename GeometricalObject::Pointer>,
std::vector< Kratos::shared_ptr<typename GeometricalObject::Pointer> >
> ContainerType;
typedef ContainerType::value_type PointerType;
typedef ContainerType::iterator IteratorType;
typedef PointerVectorSet<
GeometricalObject::Pointer,
IndexedObject,
std::less<typename IndexedObject::result_type>,
std::equal_to<typename IndexedObject::result_type>,
Kratos::shared_ptr<typename GeometricalObject::Pointer>,
std::vector< Kratos::shared_ptr<typename GeometricalObject::Pointer> >
> ResultContainerType;
typedef ResultContainerType::value_type ResultPointerType;
typedef ResultContainerType::iterator ResultIteratorType;
typedef GeometricalObject::Pointer pointer_type;
typedef CellNodeData cell_node_data_type;
typedef std::vector<CellNodeData*> data_type;
typedef std::vector<PointerType>::iterator PointerTypeIterator;
/// Pointer definition of DistanceSpatialContainersConditionConfigure
KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConditionConfigure);
///@}
///@name Life Cycle
///@{
/// Default constructor.
DistanceSpatialContainersConditionConfigure() {}
/// Destructor.
virtual ~DistanceSpatialContainersConditionConfigure() {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
static data_type* AllocateData() {
return new data_type(27, (CellNodeData*)NULL);
}
static void CopyData(data_type* source, data_type* destination) {
*destination = *source;
}
static void DeleteData(data_type* data) {
delete data;
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint)
{
rHighPoint = rObject->GetGeometry().GetPoint(0);
rLowPoint = rObject->GetGeometry().GetPoint(0);
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
}
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2)
{
Element::GeometryType& geom_1 = rObj_1->GetGeometry();
Element::GeometryType& geom_2 = rObj_2->GetGeometry();
return geom_1.HasIntersection(geom_2);
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint)
{
return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint);
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool IsIntersected(const Element::Pointer rObject, double Tolerance, const double* rLowPoint, const double* rHighPoint)
{
Point low_point(rLowPoint[0] - Tolerance, rLowPoint[1] - Tolerance, rLowPoint[2] - Tolerance);
Point high_point(rHighPoint[0] + Tolerance, rHighPoint[1] + Tolerance, rHighPoint[2] + Tolerance);
KRATOS_THROW_ERROR(std::logic_error, "Not Implemented method", "")
//return HasIntersection(rObject->GetGeometry(), low_point, high_point);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return " Spatial Containers Configure";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
protected:
private:
/// Assignment operator.
DistanceSpatialContainersConditionConfigure& operator=(DistanceSpatialContainersConditionConfigure const& rOther);
/// Copy constructor.
DistanceSpatialContainersConditionConfigure(DistanceSpatialContainersConditionConfigure const& rOther);
}; // Class DistanceSpatialContainersConditionConfigure
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class CalculateSignedDistanceTo3DConditionSkinProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of CalculateSignedDistanceTo3DConditionSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DConditionSkinProcess);
typedef DistanceSpatialContainersConditionConfigure ConfigurationType;
typedef OctreeBinaryCell<ConfigurationType> CellType;
typedef OctreeBinary<CellType> OctreeType;
typedef ConfigurationType::cell_node_data_type CellNodeDataType;
typedef Point PointType; /// always the point 3D
typedef OctreeType::cell_type::object_container_type object_container_type;
typedef struct{
array_1d<double,3> Coordinates;
array_1d<double,3> StructElemNormal;
}IntersectionNodeStruct;
typedef struct{
std::vector<IntersectionNodeStruct> IntNodes;
}TetEdgeStruct;
///@}
///@name Life Cycle
///@{
/// Constructor.
CalculateSignedDistanceTo3DConditionSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid)
: mrSkinModelPart(rThisModelPartStruc), mrBodyModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid)
{
}
/// Destructor.
~CalculateSignedDistanceTo3DConditionSkinProcess() override
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
///******************************************************************************************************************
///******************************************************************************************************************
void Execute() override
{
KRATOS_TRY
/*
std::cout << "Clearing the list of correspondances between the FIXED MESH ELEMENTS and the EMBEDDED CONDITIONS THAT ARE CROSSING THEM..." << std::endl;
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
//i_fluidElement->GetValue(NEIGHBOUR_CONDITIONS).resize(0);
( i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).reserve(6);
GlobalPointersVector<GeometricalObject >& rE = i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES);
rE.erase(rE.begin(),rE.end() );
}
*/
//std::cout << "Generating the Octree..." << std::endl;
GenerateOctree();
//std::cout << "Generating the Octree finished" << std::endl;
DistanceFluidStructure();
//GenerateNodes();
CalculateDistance2(); // I have to change this. Pooyan.
// CalculateDistance();
// CalculateDistance2();
// double coord[3] = {0.4375, 0.57812, 0.5};
// double distance = DistancePositionInSpace(coord);
// KRATOS_WATCH(distance);
//mrSkinModelPart.GetCommunicator().AssembleCurrentData(DISTANCE);
// std::ofstream mesh_file1("octree1.post.msh");
// std::ofstream res_file("octree1.post.res");
// Timer::Start("Writing Gid conform Mesh");
// PrintGiDMesh(mesh_file1);
// PrintGiDResults(res_file);
//octree.PrintGiDMeshNew(mesh_file2);
// Timer::Stop("Writing Gid conform Mesh");
// KRATOS_WATCH(mrBodyModelPart);
//delete octree. TODO: Carlos
KRATOS_CATCH("");
}
///******************************************************************************************************************
///******************************************************************************************************************
void DistanceFluidStructure()
{
// Initialize nodal distances each node in the domain to 1.0
InitializeDistances();
// Initialize index table to define line Edges of fluid element
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable;
SetIndexTable(TetEdgeIndexTable);
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
}
// loop over all fluid elements
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
CalcNodalDistancesOfTetNodes( i_fluidElement , TetEdgeIndexTable );
}
KRATOS_WATCH("ENDOF LOOP")
}
///******************************************************************************************************************
///******************************************************************************************************************
void InitializeDistances()
{
ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// reset the node distance to 1.0 which is the maximum distance in our normalized space.
int nodesSize = nodes.size();
for(int i = 0 ; i < nodesSize ; i++)
nodes[i]->GetSolutionStepValue(DISTANCE) = 1.0;
ModelPart::ElementsContainerType::ContainerType& fluid_elements = mrFluidModelPart.ElementsArray();
array_1d<double,4> ElementalDistances;
const double initial_distance = 1.0;
ElementalDistances[0] = initial_distance;
ElementalDistances[1] = initial_distance;
ElementalDistances[2] = initial_distance;
ElementalDistances[3] = initial_distance;
// reset the elemental distance to 1.0 which is the maximum distance in our normalized space.
for(unsigned int i = 0 ; i < fluid_elements.size() ; i++)
{
fluid_elements[i]->SetValue(ELEMENTAL_DISTANCES,ElementalDistances);
fluid_elements[i]->GetValue(SPLIT_ELEMENT) = false;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void SetIndexTable( BoundedMatrix<unsigned int,6,2>& TetEdgeIndexTable )
{
// Initialize index table to define line Edges of fluid element
TetEdgeIndexTable(0,0) = 0;
TetEdgeIndexTable(0,1) = 1;
TetEdgeIndexTable(1,0) = 0;
TetEdgeIndexTable(1,1) = 2;
TetEdgeIndexTable(2,0) = 0;
TetEdgeIndexTable(2,1) = 3;
TetEdgeIndexTable(3,0) = 1;
TetEdgeIndexTable(3,1) = 2;
TetEdgeIndexTable(4,0) = 1;
TetEdgeIndexTable(4,1) = 3;
TetEdgeIndexTable(5,0) = 2;
TetEdgeIndexTable(5,1) = 3;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcNodalDistancesOfTetNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement,
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable)
{
std::vector<OctreeType::cell_type*> leaves;
std::vector<TetEdgeStruct> IntersectedTetEdges;
unsigned int NumberIntersectionsOnTetCorner = 0;
// Get leaves of octree intersecting with fluid element
mOctree.GetIntersectedLeaves(*(i_fluidElement).base(),leaves);
int intersection_counter=0;
//i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
// Loop over all 6 line Edges of the tetrahedra
for(unsigned int i_tetEdge = 0; i_tetEdge < 6; i_tetEdge++)
{
IdentifyIntersectionNodes( i_fluidElement , i_tetEdge , leaves , IntersectedTetEdges ,
NumberIntersectionsOnTetCorner , TetEdgeIndexTable, intersection_counter );
}
if (intersection_counter!=0)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)/=3.0*intersection_counter;
//else
// i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
//KRATOS_WATCH("============================================================")
// KRATOS_WATCH(i_fluidElement->GetValue(EMBEDDED_VELOCITY))
//KRATOS_WATCH("???????????????????????????????????????????????????????????????")
//if (intersection_counter!=0)
// KRATOS_WATCH(intersection_counter)
if(IntersectedTetEdges.size() > 0)
CalcNodalDistanceTo3DSkin( IntersectedTetEdges , i_fluidElement , NumberIntersectionsOnTetCorner );
}
///******************************************************************************************************************
///******************************************************************************************************************
void IdentifyIntersectionNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement,
unsigned int i_tetEdge,
std::vector<OctreeType::cell_type*>& leaves,
std::vector<TetEdgeStruct>& IntersectedTetEdges,
unsigned int& NumberIntersectionsOnTetCorner,
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable,
int& intersection_counter)
{
std::vector<unsigned int> IntersectingStructCondID;
TetEdgeStruct NewTetEdge;
// Get nodes of line Edge
unsigned int EdgeStartIndex = TetEdgeIndexTable(i_tetEdge,0);
unsigned int EdgeEndIndex = TetEdgeIndexTable(i_tetEdge,1);
PointType& P1 = i_fluidElement->GetGeometry()[EdgeStartIndex];
PointType& P2 = i_fluidElement->GetGeometry()[EdgeEndIndex];
double EdgeNode1[3] = {P1.X() , P1.Y() , P1.Z()};
double EdgeNode2[3] = {P2.X() , P2.Y() , P2.Z()};
//int count=0;
// loop over all octree cells which are intersected by the fluid element
for(unsigned int i_cell = 0 ; i_cell < leaves.size() ; i_cell++)
{
// Structural element contained in one cell of the octree
object_container_type* struct_cond = (leaves[i_cell]->pGetObjects());
// loop over all structural elements within each octree cell
for(object_container_type::iterator i_StructCondition = struct_cond->begin(); i_StructCondition != struct_cond->end(); i_StructCondition++)
{
//KRATOS_WATCH(struct_cond->size())
if( StructuralElementNotYetConsidered( (*i_StructCondition)->Id() , IntersectingStructCondID ) )
{
// Calculate and associate intersection point to the current fluid element
double IntersectionPoint[3] = {0.0 , 0.0 , 0.0};
int TetEdgeHasIntersections = IntersectionTriangleSegment( (*i_StructCondition)->GetGeometry() , EdgeNode1 , EdgeNode2 , IntersectionPoint );
if( TetEdgeHasIntersections == 1 )
{
IntersectionNodeStruct NewIntersectionNode;
// Assign information to the intersection node
NewIntersectionNode.Coordinates[0] = IntersectionPoint[0];
NewIntersectionNode.Coordinates[1] = IntersectionPoint[1];
NewIntersectionNode.Coordinates[2] = IntersectionPoint[2];
if ( IsNewIntersectionNode( NewIntersectionNode , IntersectedTetEdges ) )
{
if( IsIntersectionNodeOnTetEdge( IntersectionPoint , EdgeNode1 , EdgeNode2 ) )
{
// Calculate normal of the structural element at the position of the intersection point
CalculateNormal3D((*i_StructCondition)->GetGeometry(),NewIntersectionNode.StructElemNormal);
// add the new intersection point to the list of intersection points of the fluid element
NewTetEdge.IntNodes.push_back(NewIntersectionNode);
//(i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).push_back( GeometricalObject::WeakPointer( *(i_StructCondition.base()) ) );
/*
array_1d<double,3> emb_vel=(*i_StructCondition)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[1].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[2].FastGetSolutionStepValue(VELOCITY);
//KRATOS_WATCH(emb_vel)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)+=emb_vel;
intersection_counter++;
*/
// check, how many intersection nodes are located on corner points of the tetrahedra
if ( IsIntersectionOnCorner( NewIntersectionNode , EdgeNode1 , EdgeNode2) )
NumberIntersectionsOnTetCorner++;
//BY NOW I WANT TO CONSIDER ONLY THE EDGES THAT ARE CUT "NOT AT THE VERTEX"
else
{
// double dummy=0.0;
//(i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).push_back( GeometricalObject::WeakPointer( *(i_StructCondition.base()) ) );
array_1d<double,3> emb_vel=(*i_StructCondition)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[1].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[2].FastGetSolutionStepValue(VELOCITY);
//KRATOS_WATCH(emb_vel)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)+=emb_vel;
intersection_counter++;
}
//(pGeom[i].GetValue(NEIGHBOUR_ELEMENTS)).push_back( Element::WeakPointer( *(ie.base()) ) );
}
}
}
}
}
}
// check, if intersection nodes have been found on the tet edge --> if yes, then add these information to the TetEdgeVector
if( NewTetEdge.IntNodes.size() > 0 )
IntersectedTetEdges.push_back(NewTetEdge);
}
///******************************************************************************************************************
///******************************************************************************************************************
bool StructuralElementNotYetConsidered( unsigned int IDCurrentStructCond,
std::vector<unsigned int>& IntersectingStructCondID )
{
// check if the structural element was already considered as intersecting element
for(unsigned int k = 0 ; k < IntersectingStructCondID.size() ; k++)
{
if( IDCurrentStructCond == IntersectingStructCondID[k] )
return false;
}
// if structural element has not been considered in another octree, which also intersects the fluid element
// add the new object ID to the vector
IntersectingStructCondID.push_back( IDCurrentStructCond );
return true;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsIntersectionNodeOnTetEdge( double* IntersectionPoint , double* EdgeNode1 , double* EdgeNode2 )
{
// check, if intersection point is located on any edge of the fluid element
array_1d<double,3> ConnectVectTetNodeIntNode1;
array_1d<double,3> ConnectVectTetNodeIntNode2;
array_1d<double,3> EdgeVector;
ConnectVectTetNodeIntNode1[0] = IntersectionPoint[0] - EdgeNode1[0];
ConnectVectTetNodeIntNode1[1] = IntersectionPoint[1] - EdgeNode1[1];
ConnectVectTetNodeIntNode1[2] = IntersectionPoint[2] - EdgeNode1[2];
ConnectVectTetNodeIntNode2[0] = IntersectionPoint[0] - EdgeNode2[0];
ConnectVectTetNodeIntNode2[1] = IntersectionPoint[1] - EdgeNode2[1];
ConnectVectTetNodeIntNode2[2] = IntersectionPoint[2] - EdgeNode2[2];
double LengthConnectVect1 = norm_2( ConnectVectTetNodeIntNode1 );
double LengthConnectVect2 = norm_2( ConnectVectTetNodeIntNode2 );
EdgeVector[0] = EdgeNode2[0] - EdgeNode1[0];
EdgeVector[1] = EdgeNode2[1] - EdgeNode1[1];
EdgeVector[2] = EdgeNode2[2] - EdgeNode1[2];
double MaxEdgeLength = norm_2( EdgeVector );
// if both connection vectors (corner point --> intersection point)
// are smaller or equal to the edge length of tetrahedra,
// then intersection point is located on the edge
if( (LengthConnectVect1 <= (MaxEdgeLength)) && (LengthConnectVect2 <= (MaxEdgeLength)) )
return true;
else
return false;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsNewIntersectionNode(IntersectionNodeStruct& NewIntersectionNode,
std::vector<TetEdgeStruct> IntersectedTetEdges )
{
array_1d<double,3> DiffVector;
double NormDiffVector;
unsigned int NumberIntNodes;
for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ )
{
NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size();
for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ )
{
DiffVector[0] = NewIntersectionNode.Coordinates[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
DiffVector[1] = NewIntersectionNode.Coordinates[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
DiffVector[2] = NewIntersectionNode.Coordinates[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return false;
}
}
// if the new intersection node is not existing (as intersection with a corner point), then return false
return true;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsIntersectionOnCorner(IntersectionNodeStruct& NewIntersectionNode,
double* EdgeNode1,
double* EdgeNode2 )
{
array_1d<double,3> DiffVector;
double NormDiffVector;
DiffVector[0] = EdgeNode1[0] - NewIntersectionNode.Coordinates[0];
DiffVector[1] = EdgeNode1[1] - NewIntersectionNode.Coordinates[1];
DiffVector[2] = EdgeNode1[2] - NewIntersectionNode.Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return true;
DiffVector[0] = EdgeNode2[0] - NewIntersectionNode.Coordinates[0];
DiffVector[1] = EdgeNode2[1] - NewIntersectionNode.Coordinates[1];
DiffVector[2] = EdgeNode2[2] - NewIntersectionNode.Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return true;
else
return false;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalculateNormal3D(Element::GeometryType& rGeometry,
array_1d<double,3>& rResultNormal)
{
array_1d<double,3> v1;
array_1d<double,3> v2 ;
v1[0] = rGeometry[1].X() - rGeometry[0].X();
v1[1] = rGeometry[1].Y() - rGeometry[0].Y();
v1[2] = rGeometry[1].Z() - rGeometry[0].Z();
v2[0] = rGeometry[2].X() - rGeometry[0].X();
v2[1] = rGeometry[2].Y() - rGeometry[0].Y();
v2[2] = rGeometry[2].Z() - rGeometry[0].Z();
MathUtils<double>::CrossProduct(rResultNormal,v1,v2);
rResultNormal *= 0.5;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcNodalDistanceTo3DSkin(std::vector<TetEdgeStruct>& IntersectedTetEdges,
ModelPart::ElementsContainerType::iterator& i_fluid_element,
unsigned int NumberIntersectionsOnTetCorner)
{
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure;
array_1d<double,4> ElementalDistances;
// Reduce all found intersection nodes located on each tetdrahedra edge to just one intersection node by averaging
ComputeApproximationNodes(IntersectedTetEdges,NodesOfApproximatedStructure);
// Intersection with one corner point
if( NodesOfApproximatedStructure.size() == 1 && NumberIntersectionsOnTetCorner == 1 )
{
CalcSignedDistancesToOneIntNode(i_fluid_element,NodesOfApproximatedStructure,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with two corner points / one tetrahedra edge
if( NodesOfApproximatedStructure.size() == 2 && NumberIntersectionsOnTetCorner == 2 )
{
CalcSignedDistancesToTwoIntNodes(i_fluid_element,NodesOfApproximatedStructure,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with three tetrahedra edges
if( NodesOfApproximatedStructure.size() == 3 )
{
CalcSignedDistancesToThreeIntNodes(i_fluid_element,NodesOfApproximatedStructure,IntersectedTetEdges,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with four tetrahedra edges
if( NodesOfApproximatedStructure.size() == 4 )
{
CalcSignedDistancesToFourIntNodes(i_fluid_element,NodesOfApproximatedStructure,IntersectedTetEdges,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// In case there is NO intersection with fluid element
if( i_fluid_element->GetValue(SPLIT_ELEMENT) == true )
AssignDistancesToElements(i_fluid_element,ElementalDistances);
}
///******************************************************************************************************************
///******************************************************************************************************************
void ComputeApproximationNodes(std::vector<TetEdgeStruct> IntersectedTetEdges,
std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure)
{
unsigned int NumberIntNodes;
double sum_X;
double sum_Y;
double sum_Z;
// calculate average of all intersection nodes of each tetrahedra edge
for(unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++)
{
NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size();
sum_X = 0;
sum_Y = 0;
sum_Z = 0;
for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ )
{
sum_X += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
sum_Y += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
sum_Z += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
}
IntersectionNodeStruct NewApproximationNode;
NewApproximationNode.Coordinates[0] = sum_X / NumberIntNodes;
NewApproximationNode.Coordinates[1] = sum_Y / NumberIntNodes;
NewApproximationNode.Coordinates[2] = sum_Z / NumberIntNodes;
if(IntersectedTetEdges.size() <= 2)
NewApproximationNode.StructElemNormal = IntersectedTetEdges[i_TetEdge].IntNodes[0].StructElemNormal;
NodesOfApproximatedStructure.push_back(NewApproximationNode);
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToOneIntNode( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<double,4>& ElementalDistances)
{
const array_1d<double,3>& IntersectionNodeCoord = NodesOfApproximatedStructure[0].Coordinates;
array_1d<double,3> DistVecTetNode;
array_1d<double,3> TetNode;
array_1d<double,3> NormalAtIntersectionNode;
double NormDistTetNode;
double InnerProduct;
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
// Get coordinates of the fluid elmenent nodes
TetNode = rFluidGeom[i_TetNode].Coordinates();
// Compute unsigned distance
DistVecTetNode[0] = TetNode[0] - IntersectionNodeCoord[0];
DistVecTetNode[1] = TetNode[1] - IntersectionNodeCoord[1];
DistVecTetNode[2] = TetNode[2] - IntersectionNodeCoord[2];
NormDistTetNode = norm_2( DistVecTetNode );
// Get normal at intersection
NormalAtIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal;
InnerProduct = inner_prod(DistVecTetNode,NormalAtIntersectionNode);
// Assign distances as nodal solution values
if(InnerProduct>epsilon)
ElementalDistances[i_TetNode] = NormDistTetNode;
else if(InnerProduct>-epsilon)
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = -NormDistTetNode;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToTwoIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<double,4>& ElementalDistances)
{
const array_1d<double,3>& IntersectionNode1Coord = NodesOfApproximatedStructure[0].Coordinates;
const array_1d<double,3>& IntersectionNode2Coord = NodesOfApproximatedStructure[1].Coordinates;
array_1d<double,3> TetNode;
array_1d<double,3> DistVecTetNode;
array_1d<double,3> NormalAtIntersectionNode1;
array_1d<double,3> NormalAtIntersectionNode2;
array_1d<double,3> ResNormal;
double InnerProduct;
double NormDistTetNode;
const Point LinePoint1 = Point(IntersectionNode1Coord[0] , IntersectionNode1Coord[1] , IntersectionNode1Coord[2]);
const Point LinePoint2 = Point(IntersectionNode2Coord[0] , IntersectionNode2Coord[1] , IntersectionNode2Coord[2]);
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
// Get coordinates of the fluid element nodes
TetNode = rFluidGeom(i_TetNode)->Coordinates();
// Compute distance to point
NormDistTetNode = GeometryUtils::PointDistanceToLineSegment3D(LinePoint1, LinePoint2 , Point(TetNode[0],TetNode[1],TetNode[2]));
// Compute unsigned distance vector by assuming the mean position vector of the two intersection points
DistVecTetNode[0] = TetNode[0] - IntersectionNode1Coord[0];
DistVecTetNode[1] = TetNode[1] - IntersectionNode1Coord[1];
DistVecTetNode[2] = TetNode[2] - IntersectionNode1Coord[2];
// Get normal at intersections, average them and check direction of distances
NormalAtIntersectionNode1 = NodesOfApproximatedStructure[0].StructElemNormal;
NormalAtIntersectionNode2 = NodesOfApproximatedStructure[1].StructElemNormal;
// Compute unsigned distance
ResNormal[0] = 0.5*(NormalAtIntersectionNode1[0] + NormalAtIntersectionNode2[0]);
ResNormal[1] = 0.5*(NormalAtIntersectionNode1[1] + NormalAtIntersectionNode2[1]);
ResNormal[2] = 0.5*(NormalAtIntersectionNode1[2] + NormalAtIntersectionNode2[2]);
InnerProduct = inner_prod(DistVecTetNode,ResNormal);
// Assign distances as nodal solution values
if(InnerProduct>epsilon)
ElementalDistances[i_TetNode] = NormDistTetNode;
else if(InnerProduct>-epsilon)
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = -NormDistTetNode;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances)
{
array_1d<unsigned int,3> IndexNodes;
IndexNodes[0] = 0;
IndexNodes[1] = 1;
IndexNodes[2] = 2;
// Compute distance for each tetrahedra node to the triangle to approximate the structure
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges , ElementalDistances , IndexNodes );
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToFourIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances)
{
array_1d<unsigned int,3> IndexNodes_T1; // nodes of first triangle
array_1d<unsigned int,3> IndexNodes_T2; // nodes of second triangle
array_1d<double,4> ElementalDistances_T1;
array_1d<double,4> ElementalDistances_T2;
double dist_T1;
double dist_T2;
// Generate 2 triangles out of the 4 int nodes which form a parallelogram together
// Therefor first define arbitrarily a set of 3 nodes which form a triangle and search for the 2 nodes
// of the triangle which form the other triangle together with the fourth node
IndexNodes_T1[0] = 0;
IndexNodes_T1[1] = 1;
IndexNodes_T1[2] = 2;
FindIndexNodesOfTriangle2(NodesOfApproximatedStructure,IndexNodes_T2);
// Compute distance for first triangle
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges,
ElementalDistances_T1 , IndexNodes_T1 );
// Compute distance for second triangle
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges,
ElementalDistances_T2 , IndexNodes_T2 );
// Determine about the minimal distance by considering the distances to both triangles
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
dist_T1 = ElementalDistances_T1[i_TetNode];
dist_T2 = ElementalDistances_T2[i_TetNode];
if(fabs(dist_T1) < fabs(dist_T2))
ElementalDistances[i_TetNode] = dist_T1;
else
ElementalDistances[i_TetNode] = dist_T2;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void FindIndexNodesOfTriangle2(std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<unsigned int,3>& IndexNodes_T2)
{
double maxDist = 0;
unsigned int indexExcludedNode = 1000000; // index of the node which is not part of the second triangle
array_1d<double,3> TrianglePoint;
array_1d<double,3> RemainingPoint;
array_1d<double,3> DistVecNode;
RemainingPoint = NodesOfApproximatedStructure[3].Coordinates;
// strategy: these two nodes of the first triangle form a triangle with the 4th node, which are closest to that node
// --> look for these nodes with the shortest distance to the remaining node
for(unsigned int i_TriangleNode = 0 ; i_TriangleNode < 3 ; i_TriangleNode++)
{
TrianglePoint = NodesOfApproximatedStructure[i_TriangleNode].Coordinates;
DistVecNode[0] = RemainingPoint[0] - TrianglePoint[0];
DistVecNode[1] = RemainingPoint[1] - TrianglePoint[1];
DistVecNode[2] = RemainingPoint[2] - TrianglePoint[2];
if(norm_2(DistVecNode) > maxDist)
{
maxDist = norm_2(DistVecNode);
indexExcludedNode = i_TriangleNode;
}
}
// assign the "not excluded" nodes to the index vector of the second triangle
unsigned int indexCursor = 0;
for(unsigned int k = 0 ; k < 3 ; k++)
{
if(indexExcludedNode != k)
{
IndexNodes_T2[indexCursor] = k;
indexCursor += 1;
}
}
IndexNodes_T2[2] = 3;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToApproxTriangle( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances,
array_1d<unsigned int,3> IndexNodes)
{
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
array_1d<double,3> TetNode;
array_1d<double,3> IntersectionNode1Coord;
array_1d<double,3> IntersectionNode2Coord;
array_1d<double,3> IntersectionNode3Coord;
Point ApproxTrianglePoint1;
Point ApproxTrianglePoint2;
Point ApproxTrianglePoint3;
double UnsignedDistance;
double InnerProduct;
unsigned int IndexNode1;
unsigned int IndexNode2;
unsigned int IndexNode3;
// Get coordinates of the fluid element nodes
TetNode = rFluidGeom(i_TetNode)->Coordinates();
IndexNode1 = IndexNodes[0];
IndexNode2 = IndexNodes[1];
IndexNode3 = IndexNodes[2];
IntersectionNode1Coord = NodesOfApproximatedStructure[IndexNode1].Coordinates;
IntersectionNode2Coord = NodesOfApproximatedStructure[IndexNode2].Coordinates;
IntersectionNode3Coord = NodesOfApproximatedStructure[IndexNode3].Coordinates;
ApproxTrianglePoint1 = Point(IntersectionNode1Coord[0] , IntersectionNode1Coord[1] , IntersectionNode1Coord[2]);
ApproxTrianglePoint2 = Point(IntersectionNode2Coord[0] , IntersectionNode2Coord[1] , IntersectionNode2Coord[2]);
ApproxTrianglePoint3 = Point(IntersectionNode3Coord[0] , IntersectionNode3Coord[1] , IntersectionNode3Coord[2]);
// Compute distance from tet node to current triangle
UnsignedDistance = GeometryUtils::PointDistanceToTriangle3D(ApproxTrianglePoint1, ApproxTrianglePoint2 , ApproxTrianglePoint3 , Point(TetNode[0],TetNode[1],TetNode[2]));
bool TetNodeIsInsideStructure = true;
bool TetNodeIsOnStructure = true;
array_1d <double,3> DistVec;
array_1d <double,3> NormalAtIntersectionNode;
for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ )
{
for( unsigned int i_IntNode = 0 ; i_IntNode < IntersectedTetEdges[i_TetEdge].IntNodes.size() ; i_IntNode++ )
{
DistVec[0] = TetNode[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
DistVec[1] = TetNode[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
DistVec[2] = TetNode[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
NormalAtIntersectionNode = IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].StructElemNormal;
InnerProduct = inner_prod(DistVec,NormalAtIntersectionNode);
if(InnerProduct > epsilon)
{
TetNodeIsInsideStructure = false;
TetNodeIsOnStructure = false;
}
else if (InnerProduct < -epsilon)
TetNodeIsOnStructure = false;
}
}
// Assign distances as nodal solution values ( + = outside of structure, - = inside structure)
if( TetNodeIsInsideStructure == true )
ElementalDistances[i_TetNode] = -UnsignedDistance;
else if( TetNodeIsOnStructure == true )
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = +UnsignedDistance;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void AssignDistancesToElements(ModelPart::ElementsContainerType::iterator& i_fluid_element,
array_1d<double,4> ElementalDistances)
{
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
array_1d<double,4> MinElementalDistances;
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
//Assign distances to the element, if a smaller value could be found
if( fabs(ElementalDistances[i_TetNode]) < fabs(i_fluid_element->GetValue(ELEMENTAL_DISTANCES)[i_TetNode]) )
MinElementalDistances[i_TetNode] = ElementalDistances[i_TetNode];
else
MinElementalDistances[i_TetNode] = i_fluid_element->GetValue(ELEMENTAL_DISTANCES)[i_TetNode];
//Assign distances to the single nodes (for visualization), if a smaller value could be found
if( fabs(ElementalDistances[i_TetNode]) < fabs(rFluidGeom[i_TetNode].GetSolutionStepValue(DISTANCE)) )
rFluidGeom[i_TetNode].GetSolutionStepValue(DISTANCE) = ElementalDistances[i_TetNode];
}
i_fluid_element->SetValue(ELEMENTAL_DISTANCES,MinElementalDistances);
}
///******************************************************************************************************************
///******************************************************************************************************************
/*
void GenerateOctree()
{
Timer::Start("Generating Octree");
// Setting the boundingbox for non-normalized coordinates
const int dimension = 3;
double boundingBox_low[3],boundingBox_high[3];
for(int i = 0; i < dimension; i++)
{
boundingBox_low[i] = mrSkinModelPart.NodesBegin()->Coordinates()[i];
boundingBox_high[i] = mrSkinModelPart.NodesBegin()->Coordinates()[i];
}
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin();
i_node != mrSkinModelPart.NodesEnd();
i_node++)
{
for(int i = 0; i < dimension; i++)
{
if(i_node->Coordinates()[i] < boundingBox_low[i]) boundingBox_low[i] = i_node->Coordinates()[i];
if(i_node->Coordinates()[i] > boundingBox_high[i]) boundingBox_high[i] = i_node->Coordinates()[i];
}
}
mOctree.SetBoundingBox(boundingBox_low,boundingBox_high);
//mOctree.RefineWithUniformSize(0.0625);
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin() ; i_node != mrSkinModelPart.NodesEnd() ; i_node++)
{
double temp_point[3];
const array_1d<double,3>& r_coordinates = i_node->Coordinates();
temp_point[0] = r_coordinates[0];
temp_point[1] = r_coordinates[1];
temp_point[2] = r_coordinates[2];
mOctree.Insert(temp_point);
}
//mOctree.Constrain2To1(); // To be removed. Pooyan.
for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin() ; i_element != mrSkinModelPart.ElementsEnd() ; i_element++)
{
mOctree.Insert(*(i_element).base());
}
Timer::Stop("Generating Octree");
// octree.Insert(*(mrSkinModelPart.ElementsBegin().base()));
KRATOS_WATCH(mOctree);
// std::cout << "######## WRITING OCTREE MESH #########" << std::endl;
// std::ofstream myfile;
// myfile.open ("unserbaum.post.msh");
// mOctree.PrintGiDMesh(myfile);
// myfile.close();
}
*/
void GenerateOctree()
{
Timer::Start("Generating Octree");
//std::cout << "Generating the Octree..." << std::endl;
double low[3];
double high[3];
for (int i = 0 ; i < 3; i++)
{
low[i] = high[i] = mrFluidModelPart.NodesBegin()->Coordinates()[i];
}
// loop over all structure nodes
for(ModelPart::NodeIterator i_node = mrFluidModelPart.NodesBegin();
i_node != mrFluidModelPart.NodesEnd();
i_node++)
{
const array_1d<double,3>& r_coordinates = i_node->Coordinates();
for (int i = 0 ; i < 3; i++)
{
low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i];
high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i];
}
}
// KRATOS_WATCH( low[0] )
// KRATOS_WATCH( low[1] )
// KRATOS_WATCH( low[2] )
// KRATOS_WATCH( "" )
// KRATOS_WATCH( high[0] )
// KRATOS_WATCH( high[1] )
// KRATOS_WATCH( high[2] )
mOctree.SetBoundingBox(low,high);
//mOctree.RefineWithUniformSize(0.0625);
// loop over all structure nodes
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin();
i_node != mrSkinModelPart.NodesEnd();
i_node++)
{
double temp_point[3];
temp_point[0] = i_node->X();
temp_point[1] = i_node->Y();
temp_point[2] = i_node->Z();
mOctree.Insert(temp_point);
}
//mOctree.Constrain2To1(); // To be removed. Pooyan.
// loop over all structure elements
//for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin();
// i_element != mrSkinModelPart.ElementsEnd();
// i_element++)
for(ModelPart::ConditionIterator i_cond = mrSkinModelPart.ConditionsBegin();
i_cond != mrSkinModelPart.ConditionsEnd();
i_cond++)
{
mOctree.Insert(*(i_cond).base());
}
Timer::Stop("Generating Octree");
// KRATOS_WATCH(mOctree);
// std::cout << "######## WRITING OCTREE MESH #########" << std::endl;
// std::ofstream myfile;
// myfile.open ("octree.post.msh");
// mOctree.PrintGiDMesh(myfile);
// myfile.close();
//std::cout << "Generating the Octree finished" << std::endl;
}
///******************************************************************************************************************
///******************************************************************************************************************
///******************************************************************************************************************
///******************************************************************************************************************
void GenerateNodes()
{
Timer::Start("Generating Nodes");
std::vector<OctreeType::cell_type*> all_leaves;
mOctree.GetAllLeavesVector(all_leaves);
IndexPartition<std::size_t>(all_leaves.size()).for_each([&](std::size_t Index){
*(all_leaves[Index]->pGetDataPointer()) = ConfigurationType::AllocateData();
});
std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1;
//KRATOS_WATCH(all_leaves.size());
for (std::size_t i = 0; i < all_leaves.size(); i++)
{
//KRATOS_WATCH(i)
CellType* cell = all_leaves[i];
GenerateCellNode(cell, last_id);
}
Timer::Stop("Generating Nodes");
}
void GenerateCellNode(CellType* pCell, std::size_t& LastId)
{
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
ConfigurationType::cell_node_data_type* p_node = (*(pCell->pGetData()))[i_pos];
if(p_node == 0)
{
(*(pCell->pGetData()))[i_pos] = new ConfigurationType::cell_node_data_type;
(*(pCell->pGetData()))[i_pos]->Id() = LastId++;
//KRATOS_WATCH(LastId)
mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]);
SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]);
}
}
}
void SetNodeInNeighbours(CellType* pCell, int Position, CellNodeDataType* pNode)
{
CellType::key_type point_key[3];
pCell->GetKey(Position, point_key);
for (std::size_t i_direction = 0; i_direction < 8; i_direction++) {
CellType::key_type neighbour_key[3];
if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) {
CellType* neighbour_cell = mOctree.pGetCell(neighbour_key);
if (!neighbour_cell || (neighbour_cell == pCell))
continue;
std::size_t position = neighbour_cell->GetLocalPosition(point_key);
if((*neighbour_cell->pGetData())[position])
{
std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl;
continue;
}
(*neighbour_cell->pGetData())[position] = pNode;
}
}
}
void CalculateDistance2()
{
Timer::Start("Calculate Distances2");
ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
int nodes_size = nodes.size();
// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space.
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
//int leaves_size = leaves.size();
// for(int i = 0 ; i < leaves_size ; i++)
// CalculateNotEmptyLeavesDistance(leaves[i]);
IndexPartition<std::size_t>(nodes_size).for_each([&](std::size_t Index){
CalculateNodeDistance(*(nodes[Index]));
});
Timer::Stop("Calculate Distances2");
}
// void CalculateDistance3()
// {
// Timer::Start("Calculate Distances2");
// ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// int nodes_size = nodes.size();
//// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space.
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
// std::vector<CellType*> leaves;
// mOctree.GetAllLeavesVector(leaves);
// int leaves_size = leaves.size();
// for(int i = 0 ; i < leaves_size ; i++)
// CalculateNotEmptyLeavesDistance(leaves[i]);
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// {
// CalculateNodeDistance(*(nodes[i]));
// }
// Timer::Stop("Calculate Distances2");
// }
// void CalculateDistance4()
// {
// Timer::Start("Calculate Distances3");
// ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// int nodes_size = nodes.size();
// std::vector<CellType*> leaves;
// mOctree.GetAllLeavesVector(leaves);
// int leaves_size = leaves.size();
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// {
// CalculateNodeDistanceFromCell(*(nodes[i]));
// }
// Timer::Stop("Calculate Distances3");
// }
void CalculateDistance()
{
Timer::Start("Calculate Distances");
ConfigurationType::data_type& nodes = mOctreeNodes;
int nodes_size = nodes.size();
// first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space.
IndexPartition<std::size_t>(nodes_size).for_each([&](std::size_t Index){
nodes[Index]->Distance() = 1.00;
});
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
int leaves_size = leaves.size();
for(int i = 0 ; i < leaves_size ; i++)
CalculateNotEmptyLeavesDistance(leaves[i]);
for(int i_direction = 0 ; i_direction < 1 ; i_direction++)
{
//#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
{
if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00)
// if((*nodes[i])[i_direction] == 0.00)
CalculateDistance(*(nodes[i]), i_direction);
}
}
Timer::Stop("Calculate Distances");
}
void CalculateDistance(CellNodeDataType& rNode, int i_direction)
{
double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()};
// KRATOS_WATCH_3(coords);
//This function must color the positions in space defined by 'coords'.
//coords is of dimension (3) normalized in (0,1)^3 space
typedef Element::GeometryType triangle_type;
typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
intersections_container_type intersections;
ConfigurationType::data_type nodes_array;
const double epsilon = 1e-12;
double distance = 1.0;
// Creating the ray
double ray[3] = {coords[0], coords[1], coords[2]};
ray[i_direction] = 0; // starting from the lower extreme
// KRATOS_WATCH_3(ray)
GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array);
// KRATOS_WATCH(nodes_array.size())
for (std::size_t i_node = 0; i_node < nodes_array.size() ; i_node++)
{
double coord = (*nodes_array[i_node])[i_direction];
// KRATOS_WATCH(intersections.size());
int ray_color= 1;
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (i_intersection != intersections.end()) {
double d = coord - i_intersection->first;
if (d > epsilon) {
ray_color = -ray_color;
distance = d;
} else if (d > -epsilon) {//interface
distance = 0.00;
break;
} else {
if(distance > -d)
distance = -d;
break;
}
i_intersection++;
}
distance *= ray_color;
double& node_distance = nodes_array[i_node]->Distance();
if(fabs(distance) < fabs(node_distance))
node_distance = distance;
else if (distance*node_distance < 0.00) // assigning the correct sign
node_distance = -node_distance;
}
}
void CalculateNotEmptyLeavesDistance(CellType* pCell)
{
//typedef Element::GeometryType triangle_type;
typedef OctreeType::cell_type::object_container_type object_container_type;
object_container_type* objects = (pCell->pGetObjects());
// There are no intersection in empty cells
if (objects->empty())
return;
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
double distance = 1.00; // maximum distance is 1.00
for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++)
{
CellType::key_type keys[3];
pCell->GetKey(i_pos,keys);
double cell_point[3];
mOctree.CalculateCoordinates(keys,cell_point);
double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2]));
if(d < distance)
distance = d;
}
double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance();
if(distance < node_distance)
node_distance = distance;
}
}
void CalculateNodeDistance(Node<3>& rNode)
{
double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()};
double distance = DistancePositionInSpace(coord);
double& node_distance = rNode.GetSolutionStepValue(DISTANCE);
//const double epsilon = 1.00e-12;
if(fabs(node_distance) > fabs(distance))
node_distance = distance;
else if (distance*node_distance < 0.00) // assigning the correct sign
node_distance = -node_distance;
}
// void CalculateNodeDistanceFromCell(Node<3>& rNode)
// {
// OctreeType::key_type node_key[3] = {octree->CalcKeyNormalized(rNode.X()), octree->CalcKeyNormalized(rNode.Y()), octree->CalcKeyNormalized(rNode.Z())};
// OctreeType::cell_type* pcell = octree->pGetCell(node_key);
// object_container_type* objects = (pCell->pGetObjects());
// // We interpolate the cell distances for the node in empty cells
// if (objects->empty())
// {
// }
// double distance = DistancePositionInSpace(coord);
// double& node_distance = rNode.GetSolutionStepValue(DISTANCE);
// //const double epsilon = 1.00e-12;
// if(fabs(node_distance) > fabs(distance))
// node_distance = distance;
// else if (distance*node_distance < 0.00) // assigning the correct sign
// node_distance = -node_distance;
// }
double DistancePositionInSpace(double* coords)
{
//This function must color the positions in space defined by 'coords'.
//coords is of dimension (3) normalized in (0,1)^3 space
typedef Element::GeometryType triangle_type;
typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
intersections_container_type intersections;
const int dimension = 3;
const double epsilon = 1e-12;
double distances[3] = {1.0, 1.0, 1.0};
for (int i_direction = 0; i_direction < dimension; i_direction++)
{
// Creating the ray
double ray[3] = {coords[0], coords[1], coords[2]};
mOctree.NormalizeCoordinates(ray);
ray[i_direction] = 0; // starting from the lower extreme
GetIntersections(ray, i_direction, intersections);
// if(intersections.size() == 1)
// KRATOS_WATCH_3(ray)
// KRATOS_WATCH(intersections.size());
int ray_color= 1;
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (i_intersection != intersections.end()) {
double d = coords[i_direction] - i_intersection->first;
if (d > epsilon) {
ray_color = -ray_color;
distances[i_direction] = d;
// if(distances[i_direction] > d) // I think this is redundunt. Pooyan.
// {
// if(ray_color > 0.00)
// distances[i_direction] = d;
// else
// distances[i_direction] = -d;
// }
} else if (d > -epsilon) {//interface
distances[i_direction] = 0.00;
break;
} else {
if(distances[i_direction] > -d)
distances[i_direction] = -d;
break;
}
i_intersection++;
}
distances[i_direction] *= ray_color;
}
// if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00)
// KRATOS_WATCH_3(distances);
double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0];
distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance;
return distance;
}
void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, ConfigurationType::data_type& rNodesArray)
{
//This function passes the ray through the model and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
const double epsilon = 1.00e-12;
// first clearing the intersections points vector
intersections.clear();
OctreeType* octree = &mOctree;
OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])};
OctreeType::key_type cell_key[3];
// getting the entrance cell from lower extreme
ray_key[direction] = 0;
OctreeType::cell_type* cell = octree->pGetCell(ray_key);
while (cell) {
std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?!
OctreeType::key_type node_key[3];
cell->GetKey(position, node_key);
if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2]))
{
if(cell->pGetData())
{
if(cell->pGetData()->size() > position)
{
CellNodeDataType* p_node = (*cell->pGetData())[position];
if(p_node)
{
//KRATOS_WATCH(p_node->Id())
rNodesArray.push_back(p_node);
}
}
else
KRATOS_WATCH(cell->pGetData()->size())
}
}
// std::cout << ".";
GetCellIntersections(cell, ray, ray_key, direction, intersections);
// Add the cell's middle node if existed
// cell->GetKey(8, cell_key); // 8 is the central position
// ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction
// position = cell->GetLocalPosition(ray_key);
// if(position < 27) // principal nodes
// {
// if(cell->pGetData())
// {
// if(cell->pGetData()->size() > position)
// {
// Node<3>* p_node = (*cell->pGetData())[position];
// if(p_node)
// {
// //KRATOS_WATCH(p_node->Id())
// rNodesArray.push_back(p_node);
// }
// }
// else
// KRATOS_WATCH(cell->pGetData()->size())
// }
// }
// else
// {
// KRATOS_WATCH(position);
// KRATOS_WATCH(*cell);
// }
// go to the next cell
if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
ray_key[direction] = cell_key[direction];
cell = octree->pGetCell(ray_key);
ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
//cell get in pGetCell is the right one.
} else
cell = NULL;
}
// KRATOS_WATCH(rNodesArray.size());
// now eliminating the repeated objects
if (!intersections.empty()) {
//sort
std::sort(intersections.begin(), intersections.end());
// unique
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (++i_begin != intersections.end()) {
// considering the very near points as the same points
if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
*(++i_intersection) = *i_begin;
}
intersections.resize((++i_intersection) - intersections.begin());
}
}
void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections)
{
//This function passes the ray through the model and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
const double epsilon = 1.00e-12;
// first clearing the intersections points vector
intersections.clear();
OctreeType* octree = &mOctree;
OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])};
OctreeType::key_type cell_key[3];
// getting the entrance cell from lower extreme
OctreeType::cell_type* cell = octree->pGetCell(ray_key);
while (cell) {
// std::cout << ".";
GetCellIntersections(cell, ray, ray_key, direction, intersections);
// go to the next cell
if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
ray_key[direction] = cell_key[direction];
cell = octree->pGetCell(ray_key);
ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
//cell get in pGetCell is the right one.
} else
cell = NULL;
}
// now eliminating the repeated objects
if (!intersections.empty()) {
//sort
std::sort(intersections.begin(), intersections.end());
// unique
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (++i_begin != intersections.end()) {
// considering the very near points as the same points
if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
*(++i_intersection) = *i_begin;
}
intersections.resize((++i_intersection) - intersections.begin());
}
}
int GetCellIntersections(OctreeType::cell_type* cell, double* ray,
OctreeType::key_type* ray_key, int direction,
std::vector<std::pair<double, Element::GeometryType*> >& intersections) {
//This function passes the ray through the cell and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
//typedef Element::GeometryType triangle_type;
typedef OctreeType::cell_type::object_container_type object_container_type;
object_container_type* objects = (cell->pGetObjects());
// There are no intersection in empty cells
if (objects->empty())
return 0;
// std::cout << "X";
// calculating the two extreme of the ray segment inside the cell
double ray_point1[3] = {ray[0], ray[1], ray[2]};
double ray_point2[3] = {ray[0], ray[1], ray[2]};
double normalized_coordinate;
mOctree.CalculateCoordinateNormalized(ray_key[direction], normalized_coordinate);
ray_point1[direction] = normalized_coordinate;
ray_point2[direction] = ray_point1[direction] + mOctree.CalcSizeNormalized(cell);
mOctree.ScaleBackToOriginalCoordinate(ray_point1);
mOctree.ScaleBackToOriginalCoordinate(ray_point2);
for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) {
double intersection[3]={0.00,0.00,0.00};
int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays
if (is_intersected == 1) // There is an intersection but not coplanar
intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry())));
//else if(is_intersected == 2) // coplanar case
}
return 0;
}
int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint)
{
// This is the adaption of the implemnetation provided in:
// http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle()
const double epsilon = 1.00e-12;
array_1d<double,3> u, v, n; // triangle vectors
array_1d<double,3> dir, w0, w; // ray vectors
double r, a, b; // params to calc ray-plane intersect
// get triangle edge vectors and plane normal
u = rGeometry[1] - rGeometry[0];
v = rGeometry[2] - rGeometry[0];
MathUtils<double>::CrossProduct(n, u, v); // cross product
if (norm_2(n) == 0) // triangle is degenerate
return -1; // do not deal with this case
for(int i = 0 ; i < 3 ; i++)
{
dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector
w0[i] = RayPoint1[i] - rGeometry[0][i];
}
a = -inner_prod(n,w0);
b = inner_prod(n,dir);
if (fabs(b) < epsilon) { // ray is parallel to triangle plane
if (a == 0) // ray lies in triangle plane
return 2;
else return 0; // ray disjoint from plane
}
// get intersect point of ray with triangle plane
r = a / b;
if (r < 0.0) // ray goes away from triangle
return 0; // => no intersect
// for a segment, also test if (r > 1.0) => no intersect
for(int i = 0 ; i < 3 ; i++)
IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane
// is I inside T?
double uu, uv, vv, wu, wv, D;
uu = inner_prod(u,u);
uv = inner_prod(u,v);
vv = inner_prod(v,v);
for(int i = 0 ; i < 3 ; i++)
w[i] = IntersectionPoint[i] - rGeometry[0][i];
wu = inner_prod(w,u);
wv = inner_prod(w,v);
D = uv * uv - uu * vv;
// get and test parametric coords
double s, t;
s = (uv * wv - vv * wu) / D;
if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T
return 0;
t = (uv * wu - uu * wv) / D;
if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T
return 0;
return 1; // I is in T
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "CalculateSignedDistanceTo3DConditionSkinProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "CalculateSignedDistanceTo3DConditionSkinProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
void PrintGiDMesh(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
std::cout << "writing " << leaves.size() << " leaves" << std::endl;
rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl;
rOStream << "# color 96 96 96" << std::endl;
rOStream << "Coordinates" << std::endl;
rOStream << "# node number coordinate_x coordinate_y coordinate_z " << std::endl;
for(ConfigurationType::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++)
{
rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl;
//mOctree.Insert(temp_point);
}
std::cout << "Nodes written..." << std::endl;
rOStream << "end coordinates" << std::endl;
rOStream << "Elements" << std::endl;
rOStream << "# element node_1 node_2 node_3 material_number" << std::endl;
for (std::size_t i = 0; i < leaves.size(); i++) {
if ((leaves[i]->pGetData()))
{
ConfigurationType::data_type& nodes = (*(leaves[i]->pGetData()));
rOStream << i + 1;
for(int j = 0 ; j < 8 ; j++)
rOStream << " " << nodes[j]->Id();
rOStream << std::endl;
}
}
rOStream << "end elements" << std::endl;
}
void PrintGiDResults(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
rOStream << "GiD Post Results File 1.0" << std::endl << std::endl;
rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl;
rOStream << "Values" << std::endl;
for(ConfigurationType::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++)
{
rOStream << (*i_node)->Id() << " " << (*i_node)->Distance() << std::endl;
}
rOStream << "End Values" << std::endl;
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrSkinModelPart;
ModelPart& mrBodyModelPart;
ModelPart& mrFluidModelPart;
ConfigurationType::data_type mOctreeNodes;
OctreeType mOctree;
static const double epsilon;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CalculateSignedDistanceTo3DConditionSkinProcess& operator=(CalculateSignedDistanceTo3DConditionSkinProcess const& rOther);
/// Copy constructor.
//CalculateSignedDistanceTo3DConditionSkinProcess(CalculateSignedDistanceTo3DConditionSkinProcess const& rOther);
///@}
}; // Class CalculateSignedDistanceTo3DConditionSkinProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
CalculateSignedDistanceTo3DConditionSkinProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const CalculateSignedDistanceTo3DConditionSkinProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
const double CalculateSignedDistanceTo3DConditionSkinProcess::epsilon = 1e-12;
} // namespace Kratos.
#endif // KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED defined
|
stat_ops.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "stat_ops.h"
#include "utility.h"
#include "constant.h"
// calculate norm
double state_norm_squared(const CTYPE *state, ITYPE dim) {
ITYPE index;
double norm = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:norm)
#endif
for (index = 0; index < dim; ++index){
norm += pow(cabs(state[index]), 2);
}
return norm;
}
// calculate inner product of two state vector
CTYPE state_inner_product(const CTYPE *state_bra, const CTYPE *state_ket, ITYPE dim) {
#ifndef _MSC_VER
CTYPE value = 0;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:value)
#endif
for(index = 0; index < dim; ++index){
value += conj(state_bra[index]) * state_ket[index];
}
return value;
#else
double real_sum = 0.;
double imag_sum = 0.;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:real_sum,imag_sum)
#endif
for (index = 0; index < dim; ++index) {
CTYPE value;
value += conj(state_bra[index]) * state_ket[index];
real_sum += creal(value);
imag_sum += cimag(value);
}
return real_sum + 1.i * imag_sum;
#endif
}
|
CT_IMPL.c | /*
* _CT_IMPL_C_
*
* Copyright (C) 2017-2019 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include <omp.h>
#include <stdint.h>
/* OpenMP Benchmark Implementations
*
* Benchmark implementations are in the form:
*
* void BENCHTYPE_ATOMTYPE( uint64_t *ARRAY, uint64_t *IDX,
* unsigned long long iters,
* unsigned long long pes )
*
*/
void RAND_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_fetch_add( &ARRAY[IDX[i]], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
void RAND_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &ARRAY[IDX[i]], &ARRAY[IDX[i]], ARRAY[IDX[i]],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void STRIDE1_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
void STRIDE1_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void STRIDEN_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i+=stride ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
void STRIDEN_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i+=stride ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void PTRCHASE_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=0; i<iters; i++ ){
start = __atomic_fetch_add( &IDX[start],
(uint64_t)(0x00ull),
__ATOMIC_RELAXED );
}
}
}
void PTRCHASE_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=0; i<iters; i++ ){
__atomic_compare_exchange_n( &IDX[start], &start, IDX[start],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void SG_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,src,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
src = __atomic_fetch_add( &IDX[i], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[src], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
void SG_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,src,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
val = 0x00ull;
src = 0x00ull;
dest = 0x00ull;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i], &src, IDX[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[src], &val, ARRAY[src],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void CENTRAL_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
#pragma omp parallel private(i)
{
#pragma omp for
for( i=0; i<iters; i++ ){
__atomic_fetch_add( &ARRAY[0], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
void CENTRAL_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
#pragma omp parallel private(i)
{
#pragma omp for
for( i=0; i<iters; i++ ){
__atomic_compare_exchange_n( &ARRAY[0], &ARRAY[0], ARRAY[0],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void SCATTER_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[i], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
void SCATTER_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
dest = 0x00ull;
val = 0x00ull;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &val, ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void GATHER_ADD( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[dest], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[i], val, __ATOMIC_RELAXED );
}
}
}
void GATHER_CAS( uint64_t *ARRAY,
uint64_t *IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
dest = 0x00ull;
val = 0x00ull;
#pragma omp for
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &val, ARRAY[dest],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
/* EOF */
|
symv_c_csr_u_hi_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include<stdlib.h>
static alphasparse_status_t
symv_s_csr_u_hi_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mule(y[i], beta);
alpha_madde(y[i], alpha, x[i]);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_Number tmp;
for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai)
{
const ALPHA_INT col = A->col_indx[ai];
if(col <= i)
{
continue;
}
else
{
alpha_setzero(tmp);
cmp_conj(tmp, A->values[ai]);
alpha_mul(tmp, alpha, tmp);
alpha_madde(y_local[tid][col], tmp, x[i]);
alpha_madde(y_local[tid][i], tmp, x[col]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT row = 0; row < m; row++)
for(ALPHA_INT i = 0; i < num_threads; i++)
alpha_adde(y[row], y_local[i][row]);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_s_csr_u_hi_omp(alpha, A, x, beta, y);
}
|
symm_x_dia_n_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Number* Y = &y[index2(cc,0,ldy)];
for (ALPHA_INT i = 0; i < mat->rows; i++)
alpha_mul(Y[i],Y[i],beta);
const ALPHA_Number* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d < 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val,X[ac]);
alpha_madde(Y[ac],val,X[ar]);
}
}
if(d == 0){
for(ALPHA_INT r = 0; r < mat->rows; ++r){
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha);
alpha_madde(Y[r],val,X[r]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include <dynmat.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k);
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const int num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3]);
int dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int with_openmp)
{
int i, j, ij;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
int i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
PHPYCONST double (*born)[3][3])
{
int i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom, num_patom, 27, 3] */
/* shortest_vectors[num_satom, num_patom, 27, 3] */
/* multiplicities[num_satom, num_patom] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
PHPYCONST double (*comm_points)[3],
PHPYCONST double (*shortest_vectors)[27][3],
const int *multiplicities,
const double *masses,
const int *s2pp_map,
const int *fc_index_map,
const int num_patom,
const int num_satom)
{
int i, j, k, l, m, N, adrs, multi;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
multi = multiplicities[j * num_patom + i];
for (l = 0; l < multi; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] *
shortest_vectors[j * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= multi;
sin_phase /= multi;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j)
{
int k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k)
{
int l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[k * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3])
{
int i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const int num_band)
{
int i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT+ 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3])
{
int i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
par_for_sparse_tr_solve_backup.c |
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/time.h>
//#include "data.h"
//#include "/esat/puck1/users/nshah/cpu_openmp/tretail_1024threads_512batch.c"
/* #include "/esat/puck1/users/nshah/cpu_openmp/HB_494_bus_TWO_WAY_PARTITION_TWO_WAY_LIMIT_LAYERS_CPU_2.c" */
/* #include "/esat/puck1/users/nshah/cpu_openmp/HB_orani678_TWO_WAY_PARTITION_TWO_WAY_LIMIT_LAYERS_CPU_2.c" */
#include "/esat/puck1/users/nshah/cpu_openmp/HB_bcsstk15_TWO_WAY_PARTITION_TWO_WAY_LIMIT_LAYERS_CPU_2.c"
int par_for(const int n_iter, const int N_for_threads, const int N_layers, const int *layer_len,
const int * cum_layer_len, const int* ptr_offset, const int* output_offset,
float *res, const bool *op, const int *ptr_0, const int *ptr_1) {
double barrier_start_time[N_for_threads];
double barrier_end_time[N_for_threads];
double total_barrier_time[N_for_threads];
for(int i=0; i< N_for_threads; i++) {
barrier_start_time[i] = 0;
barrier_end_time[i] = 0;
total_barrier_time[i] = 0;
}
for(int i=0; i< n_iter; i++) {
for (int l=0; l< N_layers; l++) {
//printf("%d\n", l);
#pragma omp parallel for
for (int t = 0; t< N_for_threads; t++) {
barrier_end_time[t] = omp_get_wtime();
/* printf("barrier time of thread %d: %fus\n",t, 1.e6* (barrier_end_time[t] - barrier_start_time[t])); */
if(barrier_start_time[t] != 0) {
total_barrier_time[t] += barrier_end_time[t] - barrier_start_time[t];
}
//printf("%d %d\n", l, t);
int cum_layer_offset= cum_layer_len[t*(N_layers + 1) + l];
int ptr_o= ptr_offset[t];
int out_o= output_offset[t];
for (int layer_l = 0; layer_l< layer_len[t*N_layers + l]; layer_l++) {
//printf("%d %d %d\n", l, t, layer_l);
int cum_layer_l = cum_layer_offset + layer_l;
int ptr_idx= cum_layer_l + ptr_o;
//printf("cum_layer_l: %d, idx: %d\n", cum_layer_l, idx);
float in_0= res[ptr_0[ptr_idx]];
float in_1= res[ptr_1[ptr_idx]];
int out_idx= cum_layer_l + out_o;
res[out_idx]= op[ptr_idx]? in_0 * in_1 : in_0 + in_1;
//printf("l, t, layer_l, b: %d, %d, %d, %d\n", l, t, layer_l, b);
//printf("l, t, layer_l: %d, %d, %d\n", l, t, layer_l);
}
barrier_start_time[t]= omp_get_wtime();
/* printf("Layer: %d, thread: %d\n", l, t); */
}
//cum_layer_len += layer_len[l];
}
}
/* for (int t = 0; t< N_for_threads; t++) { */
/* printf("Total time spent on barrier: %f\n", total_barrier_time[t]); */
/* } */
}
int scatter_leaves(int n_leaves, int* l_mem_idx_ls, float* l_val_ls, float* res){
#pragma omp parallel
{
#pragma omp for
for (int l = 0; l< n_leaves; l++) {
res[l_mem_idx_ls[l]] = l_val_ls[l];
}
}
}
int main(int argc, char *argv[]) {
//printf("Total threads: %d\n", omp_get_num_threads());
//float * res;
//bool* op;
//int * ptr_0;
//int * ptr_1;
//omp_set_num_threads(N_for_threads);
//
printf("Scattering leaves\n");
scatter_leaves(n_leaves, l_mem_idx_ls, l_val_ls, res);
printf("Done scattering\n");
omp_set_num_threads(N_for_threads);
par_for(1, N_for_threads, N_layers, layer_len,
cum_layer_len, ptr_offset, output_offset,
res,op,ptr_0,ptr_1);
int n_iter= 1e5;
n_iter = atoi(argv[1]);
struct timeval start, end;
gettimeofday(&start, NULL);
//printf("Total threads: %d\n", omp_get_num_threads());
par_for(n_iter, N_for_threads, N_layers, layer_len,
cum_layer_len, ptr_offset, output_offset,
res,op,ptr_0,ptr_1);
gettimeofday(&end, NULL);
float delta = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
for(int b=0; b< 1; b++) {
printf("results,%f,actual,%f\n", res[head_node_idx], golden_val);
}
//printf("results %f, actual: %f\n", res[0], golden_val);
printf("per_inference,%fs,\n", delta/n_iter);
printf("throughput,%f MOPS\n", (1.0 * n_compute * n_iter)/(delta * 1.e6));
}
|
GB_unop__identity_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fp32)
// op(A') function: GB (_unop_tran__identity_int8_fp32)
// C type: int8_t
// A type: float
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fp32)
(
int8_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_bool
// op(A') function: GB_tran__minv_uint8_bool
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_bool
(
uint8_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ompcompress.c | #ifdef _OPENMP
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
else
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
int sw = field->sw ? field->sw : (int)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
DRB028-privatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be annotated as private to avoid race condition.
Data race pairs: tmp@65:5 vs. tmp@66:12
tmp@65:5 vs. tmp@65:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
int a[100];
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for private(i, tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
GB_binop__rminus_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16)
// A*D function (colscale): GB (_AxD__rminus_uint16)
// D*A function (rowscale): GB (_DxB__rminus_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16)
// C=scalar+B GB (_bind1st__rminus_uint16)
// C=scalar+B' GB (_bind1st_tran__rminus_uint16)
// C=A+scalar GB (_bind2nd__rminus_uint16)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
problem.p4.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
double r2xx = 2.0;
double r2yy = 2.0;
double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', and u''
// v(w) = w^4 - 2w^3 + w^2 + c
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1/30 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
double shift = 0.0;if(isPeriodic)shift= -1.0/30.0;
double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) + shift;
double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) + shift;
double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) + shift;
double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x;
double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y;
double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z;
double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0;
double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0;
double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0;
*U = X*Y*Z;
*Ux = Xx*Y*Z;
*Uy = X*Yy*Z;
*Uz = X*Y*Zz;
*Uxx = Xxx*Y*Z;
*Uyy = X*Yyy*Z;
*Uzz = X*Y*Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
memset(level->my_boxes[box].vectors[VECTOR_ALPHA ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_I],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_J],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_K],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_UTRUE ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_F ],0,level->my_boxes[box].volume*sizeof(double));
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
#pragma omp parallel for private(k,j,i) collapse(2)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FIX... move to quadrature version to initialize the problem.
// i.e. the value of an array element is the average value of the function over the cell (finite volume)
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->domain_boundary_condition == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_UTRUE][ijk] = U;
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
if(level->alpha_is_zero==-1)level->alpha_is_zero = (dot(level,VECTOR_ALPHA,VECTOR_ALPHA) == 0.0);
// FIX... Periodic Boundary Conditions...
if(level->domain_boundary_condition == BC_PERIODIC){
double average_value_of_f = mean(level,VECTOR_F);
if(average_value_of_f!=0.0)if(level->my_rank==0){printf("\n WARNING... Periodic boundary conditions, but f does not sum to zero... mean(f)=%e\n",average_value_of_f);}
if((a==0.0) || (level->alpha_is_zero==1) ){ // poisson... by convention, we assume u sums to zero...
double average_value_of_u = mean(level,VECTOR_UTRUE);
if(level->my_rank==0){printf("\n average value of u = %20.12e... shifting u to ensure it sums to zero...\n",average_value_of_u);fflush(stdout);}
shift_vector(level,VECTOR_UTRUE,VECTOR_UTRUE,-average_value_of_u);
shift_vector(level,VECTOR_F,VECTOR_F,-average_value_of_f);
}
//}else{ // helmholtz...
// FIX... for helmoltz, does the fine grid RHS have to sum to zero ???
//double average_value_of_f = mean(level,VECTOR_F);
//if(level->my_rank==0){printf("\n");}
//if(level->my_rank==0){printf(" average value of f = %20.12e... shifting to ensure f sums to zero...\n",average_value_of_f);fflush(stdout);}
//if(a!=0){
// shift_vector(level,VECTOR_F ,VECTOR_F ,-average_value_of_f);
// shift_vector(level,VECTOR_UTRUE,VECTOR_UTRUE,-average_value_of_f/a);
//}
//average_value_of_f = mean(level,VECTOR_F);
//if(level->my_rank==0){printf(" average value of f = %20.12e after shifting\n",average_value_of_f);fflush(stdout);}
//}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
GB_unop__signum_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__signum_fp32_fp32
// op(A') function: GB_unop_tran__signum_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_signumf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_signumf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_signumf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIGNUM || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__signum_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_signumf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__signum_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
y_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "work_lhs.h"
//#include "timers.h"
//---------------------------------------------------------------------
// Performs line solves in Y direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void y_solve()
{
int i, j, k, m, n, jsize, z;
double pivot, coeff;
int gp22, gp02;
double fjacY[5][5][PROBLEM_SIZE+1][IMAXP-1][KMAX-1];
double njacY[5][5][PROBLEM_SIZE+1][IMAXP-1][KMAX-1];
double lhsY[5][5][3][PROBLEM_SIZE][IMAXP-1][KMAX-1];
double temp1, temp2, temp3;
gp22 = grid_points[2]-2;
gp02 = grid_points[0]-2;
//---------------------------------------------------------------------
// This function computes the left hand side for the three y-factors
//---------------------------------------------------------------------
jsize = grid_points[1]-1;
//---------------------------------------------------------------------
// Compute the indices for storing the tri-diagonal matrix;
// determine a (labeled f) and n jacobians for cell c
//---------------------------------------------------------------------
#pragma omp target data map(alloc:lhsY[:][:][:][:][:][:],fjacY[:][:][:][:][:],njacY[:][:][:][:][:]) //present(rho_i,u,qs,rhs,square)
{
#pragma omp target teams distribute parallel for collapse(2) private(i,j,k,temp1,temp2,temp3)
for (j = 0; j <= jsize; j++) {
for (i = 1; i <= gp02; i++) {
for (k = 1; k <= gp22; k++) {
temp1 = rho_i[k][j][i];
temp2 = temp1 * temp1;
temp3 = temp1 * temp2;
fjacY[0][0][j][i][k] = 0.0;
fjacY[0][1][j][i][k] = 0.0;
fjacY[0][2][j][i][k] = 1.0;
fjacY[0][3][j][i][k] = 0.0;
fjacY[0][4][j][i][k] = 0.0;
fjacY[1][0][j][i][k] = - ( u[k][j][i][1]*u[k][j][i][2] ) * temp2;
fjacY[1][1][j][i][k] = u[k][j][i][2] * temp1;
fjacY[1][2][j][i][k] = u[k][j][i][1] * temp1;
fjacY[1][3][j][i][k] = 0.0;
fjacY[1][4][j][i][k] = 0.0;
fjacY[2][0][j][i][k] = - ( u[k][j][i][2]*u[k][j][i][2]*temp2)
+ c2 * qs[k][j][i];
fjacY[2][1][j][i][k] = - c2 * u[k][j][i][1] * temp1;
fjacY[2][2][j][i][k] = ( 2.0 - c2 ) * u[k][j][i][2] * temp1;
fjacY[2][3][j][i][k] = - c2 * u[k][j][i][3] * temp1;
fjacY[2][4][j][i][k] = c2;
fjacY[3][0][j][i][k] = - ( u[k][j][i][2]*u[k][j][i][3] ) * temp2;
fjacY[3][1][j][i][k] = 0.0;
fjacY[3][2][j][i][k] = u[k][j][i][3] * temp1;
fjacY[3][3][j][i][k] = u[k][j][i][2] * temp1;
fjacY[3][4][j][i][k] = 0.0;
fjacY[4][0][j][i][k] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][2] * temp2;
fjacY[4][1][j][i][k] = - c2 * u[k][j][i][1]*u[k][j][i][2] * temp2;
fjacY[4][2][j][i][k] = c1 * u[k][j][i][4] * temp1
- c2 * ( qs[k][j][i] + u[k][j][i][2]*u[k][j][i][2] * temp2 );
fjacY[4][3][j][i][k] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * temp2;
fjacY[4][4][j][i][k] = c1 * u[k][j][i][2] * temp1;
njacY[0][0][j][i][k] = 0.0;
njacY[0][1][j][i][k] = 0.0;
njacY[0][2][j][i][k] = 0.0;
njacY[0][3][j][i][k] = 0.0;
njacY[0][4][j][i][k] = 0.0;
njacY[1][0][j][i][k] = - c3c4 * temp2 * u[k][j][i][1];
njacY[1][1][j][i][k] = c3c4 * temp1;
njacY[1][2][j][i][k] = 0.0;
njacY[1][3][j][i][k] = 0.0;
njacY[1][4][j][i][k] = 0.0;
njacY[2][0][j][i][k] = - con43 * c3c4 * temp2 * u[k][j][i][2];
njacY[2][1][j][i][k] = 0.0;
njacY[2][2][j][i][k] = con43 * c3c4 * temp1;
njacY[2][3][j][i][k] = 0.0;
njacY[2][4][j][i][k] = 0.0;
njacY[3][0][j][i][k] = - c3c4 * temp2 * u[k][j][i][3];
njacY[3][1][j][i][k] = 0.0;
njacY[3][2][j][i][k] = 0.0;
njacY[3][3][j][i][k] = c3c4 * temp1;
njacY[3][4][j][i][k] = 0.0;
njacY[4][0][j][i][k] = - ( c3c4
- c1345 ) * temp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( con43 * c3c4
- c1345 ) * temp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( c3c4 - c1345 ) * temp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * temp2 * u[k][j][i][4];
njacY[4][1][j][i][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][1];
njacY[4][2][j][i][k] = ( con43 * c3c4 - c1345 ) * temp2 * u[k][j][i][2];
njacY[4][3][j][i][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][3];
njacY[4][4][j][i][k] = ( c1345 ) * temp1;
}
}
}
//---------------------------------------------------------------------
// now joacobians set, so form left hand side in y direction
//---------------------------------------------------------------------
//lhsY[k][i]init(lhsY[k][i], jsize);
// zero the whole left hand side for starters
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k) collapse(3)
#else
#pragma omp target teams distribute parallel for simd collapse(4)
#endif
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (k = 1; k <= gp22; k++) {
lhsY[m][n][0][0][i][k] = 0.0;
lhsY[m][n][1][0][i][k] = 0.0;
lhsY[m][n][2][0][i][k] = 0.0;
lhsY[m][n][0][jsize][i][k] = 0.0;
lhsY[m][n][1][jsize][i][k] = 0.0;
lhsY[m][n][2][jsize][i][k] = 0.0;
}
}
}
}
// next, set all diagonal values to 1. This is overkill, but convenient
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k) collapse(2)
#else
#pragma omp target teams distribute parallel for simd collapse(3)
#endif
for (m = 0; m < 5; m++){
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (k = 1; k <= gp22; k++) {
lhsY[m][m][1][0][i][k] = 1.0;
lhsY[m][m][1][jsize][i][k] = 1.0;
}
}
}
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for collapse(2) private(i,j,k)
#else
#pragma omp target teams distribute parallel for simd collapse(3)
#endif
for (j = 1; j <= jsize-1; j++) {
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (k = 1; k <= gp22; k++) {
lhsY[0][0][AA][j][i][k] = - dtty2 * fjacY[0][0][j-1][i][k]
- dtty1 * njacY[0][0][j-1][i][k]
- dtty1 * dy1;
lhsY[0][1][AA][j][i][k] = - dtty2 * fjacY[0][1][j-1][i][k]
- dtty1 * njacY[0][1][j-1][i][k];
lhsY[0][2][AA][j][i][k] = - dtty2 * fjacY[0][2][j-1][i][k]
- dtty1 * njacY[0][2][j-1][i][k];
lhsY[0][3][AA][j][i][k] = - dtty2 * fjacY[0][3][j-1][i][k]
- dtty1 * njacY[0][3][j-1][i][k];
lhsY[0][4][AA][j][i][k] = - dtty2 * fjacY[0][4][j-1][i][k]
- dtty1 * njacY[0][4][j-1][i][k];
lhsY[1][0][AA][j][i][k] = - dtty2 * fjacY[1][0][j-1][i][k]
- dtty1 * njacY[1][0][j-1][i][k];
lhsY[1][1][AA][j][i][k] = - dtty2 * fjacY[1][1][j-1][i][k]
- dtty1 * njacY[1][1][j-1][i][k]
- dtty1 * dy2;
lhsY[1][2][AA][j][i][k] = - dtty2 * fjacY[1][2][j-1][i][k]
- dtty1 * njacY[1][2][j-1][i][k];
lhsY[1][3][AA][j][i][k] = - dtty2 * fjacY[1][3][j-1][i][k]
- dtty1 * njacY[1][3][j-1][i][k];
lhsY[1][4][AA][j][i][k] = - dtty2 * fjacY[1][4][j-1][i][k]
- dtty1 * njacY[1][4][j-1][i][k];
lhsY[2][0][AA][j][i][k] = - dtty2 * fjacY[2][0][j-1][i][k]
- dtty1 * njacY[2][0][j-1][i][k];
lhsY[2][1][AA][j][i][k] = - dtty2 * fjacY[2][1][j-1][i][k]
- dtty1 * njacY[2][1][j-1][i][k];
lhsY[2][2][AA][j][i][k] = - dtty2 * fjacY[2][2][j-1][i][k]
- dtty1 * njacY[2][2][j-1][i][k]
- dtty1 * dy3;
lhsY[2][3][AA][j][i][k] = - dtty2 * fjacY[2][3][j-1][i][k]
- dtty1 * njacY[2][3][j-1][i][k];
lhsY[2][4][AA][j][i][k] = - dtty2 * fjacY[2][4][j-1][i][k]
- dtty1 * njacY[2][4][j-1][i][k];
lhsY[3][0][AA][j][i][k] = - dtty2 * fjacY[3][0][j-1][i][k]
- dtty1 * njacY[3][0][j-1][i][k];
lhsY[3][1][AA][j][i][k] = - dtty2 * fjacY[3][1][j-1][i][k]
- dtty1 * njacY[3][1][j-1][i][k];
lhsY[3][2][AA][j][i][k] = - dtty2 * fjacY[3][2][j-1][i][k]
- dtty1 * njacY[3][2][j-1][i][k];
lhsY[3][3][AA][j][i][k] = - dtty2 * fjacY[3][3][j-1][i][k]
- dtty1 * njacY[3][3][j-1][i][k]
- dtty1 * dy4;
lhsY[3][4][AA][j][i][k] = - dtty2 * fjacY[3][4][j-1][i][k]
- dtty1 * njacY[3][4][j-1][i][k];
lhsY[4][0][AA][j][i][k] = - dtty2 * fjacY[4][0][j-1][i][k]
- dtty1 * njacY[4][0][j-1][i][k];
lhsY[4][1][AA][j][i][k] = - dtty2 * fjacY[4][1][j-1][i][k]
- dtty1 * njacY[4][1][j-1][i][k];
lhsY[4][2][AA][j][i][k] = - dtty2 * fjacY[4][2][j-1][i][k]
- dtty1 * njacY[4][2][j-1][i][k];
lhsY[4][3][AA][j][i][k] = - dtty2 * fjacY[4][3][j-1][i][k]
- dtty1 * njacY[4][3][j-1][i][k];
lhsY[4][4][AA][j][i][k] = - dtty2 * fjacY[4][4][j-1][i][k]
- dtty1 * njacY[4][4][j-1][i][k]
- dtty1 * dy5;
lhsY[0][0][BB][j][i][k] = 1.0
+ dtty1 * 2.0 * njacY[0][0][j][i][k]
+ dtty1 * 2.0 * dy1;
lhsY[0][1][BB][j][i][k] = dtty1 * 2.0 * njacY[0][1][j][i][k];
lhsY[0][2][BB][j][i][k] = dtty1 * 2.0 * njacY[0][2][j][i][k];
lhsY[0][3][BB][j][i][k] = dtty1 * 2.0 * njacY[0][3][j][i][k];
lhsY[0][4][BB][j][i][k] = dtty1 * 2.0 * njacY[0][4][j][i][k];
lhsY[1][0][BB][j][i][k] = dtty1 * 2.0 * njacY[1][0][j][i][k];
lhsY[1][1][BB][j][i][k] = 1.0
+ dtty1 * 2.0 * njacY[1][1][j][i][k]
+ dtty1 * 2.0 * dy2;
lhsY[1][2][BB][j][i][k] = dtty1 * 2.0 * njacY[1][2][j][i][k];
lhsY[1][3][BB][j][i][k] = dtty1 * 2.0 * njacY[1][3][j][i][k];
lhsY[1][4][BB][j][i][k] = dtty1 * 2.0 * njacY[1][4][j][i][k];
lhsY[2][0][BB][j][i][k] = dtty1 * 2.0 * njacY[2][0][j][i][k];
lhsY[2][1][BB][j][i][k] = dtty1 * 2.0 * njacY[2][1][j][i][k];
lhsY[2][2][BB][j][i][k] = 1.0
+ dtty1 * 2.0 * njacY[2][2][j][i][k]
+ dtty1 * 2.0 * dy3;
lhsY[2][3][BB][j][i][k] = dtty1 * 2.0 * njacY[2][3][j][i][k];
lhsY[2][4][BB][j][i][k] = dtty1 * 2.0 * njacY[2][4][j][i][k];
lhsY[3][0][BB][j][i][k] = dtty1 * 2.0 * njacY[3][0][j][i][k];
lhsY[3][1][BB][j][i][k] = dtty1 * 2.0 * njacY[3][1][j][i][k];
lhsY[3][2][BB][j][i][k] = dtty1 * 2.0 * njacY[3][2][j][i][k];
lhsY[3][3][BB][j][i][k] = 1.0
+ dtty1 * 2.0 * njacY[3][3][j][i][k]
+ dtty1 * 2.0 * dy4;
lhsY[3][4][BB][j][i][k] = dtty1 * 2.0 * njacY[3][4][j][i][k];
lhsY[4][0][BB][j][i][k] = dtty1 * 2.0 * njacY[4][0][j][i][k];
lhsY[4][1][BB][j][i][k] = dtty1 * 2.0 * njacY[4][1][j][i][k];
lhsY[4][2][BB][j][i][k] = dtty1 * 2.0 * njacY[4][2][j][i][k];
lhsY[4][3][BB][j][i][k] = dtty1 * 2.0 * njacY[4][3][j][i][k];
lhsY[4][4][BB][j][i][k] = 1.0
+ dtty1 * 2.0 * njacY[4][4][j][i][k]
+ dtty1 * 2.0 * dy5;
lhsY[0][0][CC][j][i][k] = dtty2 * fjacY[0][0][j+1][i][k]
- dtty1 * njacY[0][0][j+1][i][k]
- dtty1 * dy1;
lhsY[0][1][CC][j][i][k] = dtty2 * fjacY[0][1][j+1][i][k]
- dtty1 * njacY[0][1][j+1][i][k];
lhsY[0][2][CC][j][i][k] = dtty2 * fjacY[0][2][j+1][i][k]
- dtty1 * njacY[0][2][j+1][i][k];
lhsY[0][3][CC][j][i][k] = dtty2 * fjacY[0][3][j+1][i][k]
- dtty1 * njacY[0][3][j+1][i][k];
lhsY[0][4][CC][j][i][k] = dtty2 * fjacY[0][4][j+1][i][k]
- dtty1 * njacY[0][4][j+1][i][k];
lhsY[1][0][CC][j][i][k] = dtty2 * fjacY[1][0][j+1][i][k]
- dtty1 * njacY[1][0][j+1][i][k];
lhsY[1][1][CC][j][i][k] = dtty2 * fjacY[1][1][j+1][i][k]
- dtty1 * njacY[1][1][j+1][i][k]
- dtty1 * dy2;
lhsY[1][2][CC][j][i][k] = dtty2 * fjacY[1][2][j+1][i][k]
- dtty1 * njacY[1][2][j+1][i][k];
lhsY[1][3][CC][j][i][k] = dtty2 * fjacY[1][3][j+1][i][k]
- dtty1 * njacY[1][3][j+1][i][k];
lhsY[1][4][CC][j][i][k] = dtty2 * fjacY[1][4][j+1][i][k]
- dtty1 * njacY[1][4][j+1][i][k];
lhsY[2][0][CC][j][i][k] = dtty2 * fjacY[2][0][j+1][i][k]
- dtty1 * njacY[2][0][j+1][i][k];
lhsY[2][1][CC][j][i][k] = dtty2 * fjacY[2][1][j+1][i][k]
- dtty1 * njacY[2][1][j+1][i][k];
lhsY[2][2][CC][j][i][k] = dtty2 * fjacY[2][2][j+1][i][k]
- dtty1 * njacY[2][2][j+1][i][k]
- dtty1 * dy3;
lhsY[2][3][CC][j][i][k] = dtty2 * fjacY[2][3][j+1][i][k]
- dtty1 * njacY[2][3][j+1][i][k];
lhsY[2][4][CC][j][i][k] = dtty2 * fjacY[2][4][j+1][i][k]
- dtty1 * njacY[2][4][j+1][i][k];
lhsY[3][0][CC][j][i][k] = dtty2 * fjacY[3][0][j+1][i][k]
- dtty1 * njacY[3][0][j+1][i][k];
lhsY[3][1][CC][j][i][k] = dtty2 * fjacY[3][1][j+1][i][k]
- dtty1 * njacY[3][1][j+1][i][k];
lhsY[3][2][CC][j][i][k] = dtty2 * fjacY[3][2][j+1][i][k]
- dtty1 * njacY[3][2][j+1][i][k];
lhsY[3][3][CC][j][i][k] = dtty2 * fjacY[3][3][j+1][i][k]
- dtty1 * njacY[3][3][j+1][i][k]
- dtty1 * dy4;
lhsY[3][4][CC][j][i][k] = dtty2 * fjacY[3][4][j+1][i][k]
- dtty1 * njacY[3][4][j+1][i][k];
lhsY[4][0][CC][j][i][k] = dtty2 * fjacY[4][0][j+1][i][k]
- dtty1 * njacY[4][0][j+1][i][k];
lhsY[4][1][CC][j][i][k] = dtty2 * fjacY[4][1][j+1][i][k]
- dtty1 * njacY[4][1][j+1][i][k];
lhsY[4][2][CC][j][i][k] = dtty2 * fjacY[4][2][j+1][i][k]
- dtty1 * njacY[4][2][j+1][i][k];
lhsY[4][3][CC][j][i][k] = dtty2 * fjacY[4][3][j+1][i][k]
- dtty1 * njacY[4][3][j+1][i][k];
lhsY[4][4][CC][j][i][k] = dtty2 * fjacY[4][4][j+1][i][k]
- dtty1 * njacY[4][4][j+1][i][k]
- dtty1 * dy5;
}
}
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(JMAX) and rhs'(JMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][0][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
//binvcrhs( lhsY[0][i][BB], lhsY[k][0][i][k][CC], rhs[k][0][i] );
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k,pivot, coeff)
#else
#pragma omp target teams distribute parallel for simd private(pivot, coeff) collapse(2)
#endif
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(pivot, coeff)
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsY[m][m][BB][0][i][k];
for(n = m+1; n < 5; n++){
lhsY[m][n][BB][0][i][k] = lhsY[m][n][BB][0][i][k]*pivot;
}
lhsY[m][0][CC][0][i][k] = lhsY[m][0][CC][0][i][k]*pivot;
lhsY[m][1][CC][0][i][k] = lhsY[m][1][CC][0][i][k]*pivot;
lhsY[m][2][CC][0][i][k] = lhsY[m][2][CC][0][i][k]*pivot;
lhsY[m][3][CC][0][i][k] = lhsY[m][3][CC][0][i][k]*pivot;
lhsY[m][4][CC][0][i][k] = lhsY[m][4][CC][0][i][k]*pivot;
rhs[k][0][i][m] = rhs[k][0][i][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsY[n][m][BB][0][i][k];
for(z = m+1; z < 5; z++){
lhsY[n][z][BB][0][i][k] = lhsY[n][z][BB][0][i][k] - coeff*lhsY[m][z][BB][0][i][k];
}
lhsY[n][0][CC][0][i][k] = lhsY[n][0][CC][0][i][k] - coeff*lhsY[m][0][CC][0][i][k];
lhsY[n][1][CC][0][i][k] = lhsY[n][1][CC][0][i][k] - coeff*lhsY[m][1][CC][0][i][k];
lhsY[n][2][CC][0][i][k] = lhsY[n][2][CC][0][i][k] - coeff*lhsY[m][2][CC][0][i][k];
lhsY[n][3][CC][0][i][k] = lhsY[n][3][CC][0][i][k] - coeff*lhsY[m][3][CC][0][i][k];
lhsY[n][4][CC][0][i][k] = lhsY[n][4][CC][0][i][k] - coeff*lhsY[m][4][CC][0][i][k];
rhs[k][0][i][n] = rhs[k][0][i][n] - coeff*rhs[k][0][i][m];
}
}
}
*/
pivot = 1.00/lhsY[0][0][BB][0][i][k];
lhsY[0][1][BB][0][i][k] = lhsY[0][1][BB][0][i][k]*pivot;
lhsY[0][2][BB][0][i][k] = lhsY[0][2][BB][0][i][k]*pivot;
lhsY[0][3][BB][0][i][k] = lhsY[0][3][BB][0][i][k]*pivot;
lhsY[0][4][BB][0][i][k] = lhsY[0][4][BB][0][i][k]*pivot;
lhsY[0][0][CC][0][i][k] = lhsY[0][0][CC][0][i][k]*pivot;
lhsY[0][1][CC][0][i][k] = lhsY[0][1][CC][0][i][k]*pivot;
lhsY[0][2][CC][0][i][k] = lhsY[0][2][CC][0][i][k]*pivot;
lhsY[0][3][CC][0][i][k] = lhsY[0][3][CC][0][i][k]*pivot;
lhsY[0][4][CC][0][i][k] = lhsY[0][4][CC][0][i][k]*pivot;
rhs[k][0][i][0] = rhs[k][0][i][0] *pivot;
coeff = lhsY[1][0][BB][0][i][k];
lhsY[1][1][BB][0][i][k]= lhsY[1][1][BB][0][i][k] - coeff*lhsY[0][1][BB][0][i][k];
lhsY[1][2][BB][0][i][k]= lhsY[1][2][BB][0][i][k] - coeff*lhsY[0][2][BB][0][i][k];
lhsY[1][3][BB][0][i][k]= lhsY[1][3][BB][0][i][k] - coeff*lhsY[0][3][BB][0][i][k];
lhsY[1][4][BB][0][i][k]= lhsY[1][4][BB][0][i][k] - coeff*lhsY[0][4][BB][0][i][k];
lhsY[1][0][CC][0][i][k] = lhsY[1][0][CC][0][i][k] - coeff*lhsY[0][0][CC][0][i][k];
lhsY[1][1][CC][0][i][k] = lhsY[1][1][CC][0][i][k] - coeff*lhsY[0][1][CC][0][i][k];
lhsY[1][2][CC][0][i][k] = lhsY[1][2][CC][0][i][k] - coeff*lhsY[0][2][CC][0][i][k];
lhsY[1][3][CC][0][i][k] = lhsY[1][3][CC][0][i][k] - coeff*lhsY[0][3][CC][0][i][k];
lhsY[1][4][CC][0][i][k] = lhsY[1][4][CC][0][i][k] - coeff*lhsY[0][4][CC][0][i][k];
rhs[k][0][i][1] = rhs[k][0][i][1] - coeff*rhs[k][0][i][0];
coeff = lhsY[2][0][BB][0][i][k];
lhsY[2][1][BB][0][i][k]= lhsY[2][1][BB][0][i][k] - coeff*lhsY[0][1][BB][0][i][k];
lhsY[2][2][BB][0][i][k]= lhsY[2][2][BB][0][i][k] - coeff*lhsY[0][2][BB][0][i][k];
lhsY[2][3][BB][0][i][k]= lhsY[2][3][BB][0][i][k] - coeff*lhsY[0][3][BB][0][i][k];
lhsY[2][4][BB][0][i][k]= lhsY[2][4][BB][0][i][k] - coeff*lhsY[0][4][BB][0][i][k];
lhsY[2][0][CC][0][i][k] = lhsY[2][0][CC][0][i][k] - coeff*lhsY[0][0][CC][0][i][k];
lhsY[2][1][CC][0][i][k] = lhsY[2][1][CC][0][i][k] - coeff*lhsY[0][1][CC][0][i][k];
lhsY[2][2][CC][0][i][k] = lhsY[2][2][CC][0][i][k] - coeff*lhsY[0][2][CC][0][i][k];
lhsY[2][3][CC][0][i][k] = lhsY[2][3][CC][0][i][k] - coeff*lhsY[0][3][CC][0][i][k];
lhsY[2][4][CC][0][i][k] = lhsY[2][4][CC][0][i][k] - coeff*lhsY[0][4][CC][0][i][k];
rhs[k][0][i][2] = rhs[k][0][i][2] - coeff*rhs[k][0][i][0];
coeff = lhsY[3][0][BB][0][i][k];
lhsY[3][1][BB][0][i][k]= lhsY[3][1][BB][0][i][k] - coeff*lhsY[0][1][BB][0][i][k];
lhsY[3][2][BB][0][i][k]= lhsY[3][2][BB][0][i][k] - coeff*lhsY[0][2][BB][0][i][k];
lhsY[3][3][BB][0][i][k]= lhsY[3][3][BB][0][i][k] - coeff*lhsY[0][3][BB][0][i][k];
lhsY[3][4][BB][0][i][k]= lhsY[3][4][BB][0][i][k] - coeff*lhsY[0][4][BB][0][i][k];
lhsY[3][0][CC][0][i][k] = lhsY[3][0][CC][0][i][k] - coeff*lhsY[0][0][CC][0][i][k];
lhsY[3][1][CC][0][i][k] = lhsY[3][1][CC][0][i][k] - coeff*lhsY[0][1][CC][0][i][k];
lhsY[3][2][CC][0][i][k] = lhsY[3][2][CC][0][i][k] - coeff*lhsY[0][2][CC][0][i][k];
lhsY[3][3][CC][0][i][k] = lhsY[3][3][CC][0][i][k] - coeff*lhsY[0][3][CC][0][i][k];
lhsY[3][4][CC][0][i][k] = lhsY[3][4][CC][0][i][k] - coeff*lhsY[0][4][CC][0][i][k];
rhs[k][0][i][3] = rhs[k][0][i][3] - coeff*rhs[k][0][i][0];
coeff = lhsY[4][0][BB][0][i][k];
lhsY[4][1][BB][0][i][k]= lhsY[4][1][BB][0][i][k] - coeff*lhsY[0][1][BB][0][i][k];
lhsY[4][2][BB][0][i][k]= lhsY[4][2][BB][0][i][k] - coeff*lhsY[0][2][BB][0][i][k];
lhsY[4][3][BB][0][i][k]= lhsY[4][3][BB][0][i][k] - coeff*lhsY[0][3][BB][0][i][k];
lhsY[4][4][BB][0][i][k]= lhsY[4][4][BB][0][i][k] - coeff*lhsY[0][4][BB][0][i][k];
lhsY[4][0][CC][0][i][k] = lhsY[4][0][CC][0][i][k] - coeff*lhsY[0][0][CC][0][i][k];
lhsY[4][1][CC][0][i][k] = lhsY[4][1][CC][0][i][k] - coeff*lhsY[0][1][CC][0][i][k];
lhsY[4][2][CC][0][i][k] = lhsY[4][2][CC][0][i][k] - coeff*lhsY[0][2][CC][0][i][k];
lhsY[4][3][CC][0][i][k] = lhsY[4][3][CC][0][i][k] - coeff*lhsY[0][3][CC][0][i][k];
lhsY[4][4][CC][0][i][k] = lhsY[4][4][CC][0][i][k] - coeff*lhsY[0][4][CC][0][i][k];
rhs[k][0][i][4] = rhs[k][0][i][4] - coeff*rhs[k][0][i][0];
pivot = 1.00/lhsY[1][1][BB][0][i][k];
lhsY[1][2][BB][0][i][k] = lhsY[1][2][BB][0][i][k]*pivot;
lhsY[1][3][BB][0][i][k] = lhsY[1][3][BB][0][i][k]*pivot;
lhsY[1][4][BB][0][i][k] = lhsY[1][4][BB][0][i][k]*pivot;
lhsY[1][0][CC][0][i][k] = lhsY[1][0][CC][0][i][k]*pivot;
lhsY[1][1][CC][0][i][k] = lhsY[1][1][CC][0][i][k]*pivot;
lhsY[1][2][CC][0][i][k] = lhsY[1][2][CC][0][i][k]*pivot;
lhsY[1][3][CC][0][i][k] = lhsY[1][3][CC][0][i][k]*pivot;
lhsY[1][4][CC][0][i][k] = lhsY[1][4][CC][0][i][k]*pivot;
rhs[k][0][i][1] = rhs[k][0][i][1] *pivot;
coeff = lhsY[0][1][BB][0][i][k];
lhsY[0][2][BB][0][i][k]= lhsY[0][2][BB][0][i][k] - coeff*lhsY[1][2][BB][0][i][k];
lhsY[0][3][BB][0][i][k]= lhsY[0][3][BB][0][i][k] - coeff*lhsY[1][3][BB][0][i][k];
lhsY[0][4][BB][0][i][k]= lhsY[0][4][BB][0][i][k] - coeff*lhsY[1][4][BB][0][i][k];
lhsY[0][0][CC][0][i][k] = lhsY[0][0][CC][0][i][k] - coeff*lhsY[1][0][CC][0][i][k];
lhsY[0][1][CC][0][i][k] = lhsY[0][1][CC][0][i][k] - coeff*lhsY[1][1][CC][0][i][k];
lhsY[0][2][CC][0][i][k] = lhsY[0][2][CC][0][i][k] - coeff*lhsY[1][2][CC][0][i][k];
lhsY[0][3][CC][0][i][k] = lhsY[0][3][CC][0][i][k] - coeff*lhsY[1][3][CC][0][i][k];
lhsY[0][4][CC][0][i][k] = lhsY[0][4][CC][0][i][k] - coeff*lhsY[1][4][CC][0][i][k];
rhs[k][0][i][0] = rhs[k][0][i][0] - coeff*rhs[k][0][i][1];
coeff = lhsY[2][1][BB][0][i][k];
lhsY[2][2][BB][0][i][k]= lhsY[2][2][BB][0][i][k] - coeff*lhsY[1][2][BB][0][i][k];
lhsY[2][3][BB][0][i][k]= lhsY[2][3][BB][0][i][k] - coeff*lhsY[1][3][BB][0][i][k];
lhsY[2][4][BB][0][i][k]= lhsY[2][4][BB][0][i][k] - coeff*lhsY[1][4][BB][0][i][k];
lhsY[2][0][CC][0][i][k] = lhsY[2][0][CC][0][i][k] - coeff*lhsY[1][0][CC][0][i][k];
lhsY[2][1][CC][0][i][k] = lhsY[2][1][CC][0][i][k] - coeff*lhsY[1][1][CC][0][i][k];
lhsY[2][2][CC][0][i][k] = lhsY[2][2][CC][0][i][k] - coeff*lhsY[1][2][CC][0][i][k];
lhsY[2][3][CC][0][i][k] = lhsY[2][3][CC][0][i][k] - coeff*lhsY[1][3][CC][0][i][k];
lhsY[2][4][CC][0][i][k] = lhsY[2][4][CC][0][i][k] - coeff*lhsY[1][4][CC][0][i][k];
rhs[k][0][i][2] = rhs[k][0][i][2] - coeff*rhs[k][0][i][1];
coeff = lhsY[3][1][BB][0][i][k];
lhsY[3][2][BB][0][i][k]= lhsY[3][2][BB][0][i][k] - coeff*lhsY[1][2][BB][0][i][k];
lhsY[3][3][BB][0][i][k]= lhsY[3][3][BB][0][i][k] - coeff*lhsY[1][3][BB][0][i][k];
lhsY[3][4][BB][0][i][k]= lhsY[3][4][BB][0][i][k] - coeff*lhsY[1][4][BB][0][i][k];
lhsY[3][0][CC][0][i][k] = lhsY[3][0][CC][0][i][k] - coeff*lhsY[1][0][CC][0][i][k];
lhsY[3][1][CC][0][i][k] = lhsY[3][1][CC][0][i][k] - coeff*lhsY[1][1][CC][0][i][k];
lhsY[3][2][CC][0][i][k] = lhsY[3][2][CC][0][i][k] - coeff*lhsY[1][2][CC][0][i][k];
lhsY[3][3][CC][0][i][k] = lhsY[3][3][CC][0][i][k] - coeff*lhsY[1][3][CC][0][i][k];
lhsY[3][4][CC][0][i][k] = lhsY[3][4][CC][0][i][k] - coeff*lhsY[1][4][CC][0][i][k];
rhs[k][0][i][3] = rhs[k][0][i][3] - coeff*rhs[k][0][i][1];
coeff = lhsY[4][1][BB][0][i][k];
lhsY[4][2][BB][0][i][k]= lhsY[4][2][BB][0][i][k] - coeff*lhsY[1][2][BB][0][i][k];
lhsY[4][3][BB][0][i][k]= lhsY[4][3][BB][0][i][k] - coeff*lhsY[1][3][BB][0][i][k];
lhsY[4][4][BB][0][i][k]= lhsY[4][4][BB][0][i][k] - coeff*lhsY[1][4][BB][0][i][k];
lhsY[4][0][CC][0][i][k] = lhsY[4][0][CC][0][i][k] - coeff*lhsY[1][0][CC][0][i][k];
lhsY[4][1][CC][0][i][k] = lhsY[4][1][CC][0][i][k] - coeff*lhsY[1][1][CC][0][i][k];
lhsY[4][2][CC][0][i][k] = lhsY[4][2][CC][0][i][k] - coeff*lhsY[1][2][CC][0][i][k];
lhsY[4][3][CC][0][i][k] = lhsY[4][3][CC][0][i][k] - coeff*lhsY[1][3][CC][0][i][k];
lhsY[4][4][CC][0][i][k] = lhsY[4][4][CC][0][i][k] - coeff*lhsY[1][4][CC][0][i][k];
rhs[k][0][i][4] = rhs[k][0][i][4] - coeff*rhs[k][0][i][1];
pivot = 1.00/lhsY[2][2][BB][0][i][k];
lhsY[2][3][BB][0][i][k] = lhsY[2][3][BB][0][i][k]*pivot;
lhsY[2][4][BB][0][i][k] = lhsY[2][4][BB][0][i][k]*pivot;
lhsY[2][0][CC][0][i][k] = lhsY[2][0][CC][0][i][k]*pivot;
lhsY[2][1][CC][0][i][k] = lhsY[2][1][CC][0][i][k]*pivot;
lhsY[2][2][CC][0][i][k] = lhsY[2][2][CC][0][i][k]*pivot;
lhsY[2][3][CC][0][i][k] = lhsY[2][3][CC][0][i][k]*pivot;
lhsY[2][4][CC][0][i][k] = lhsY[2][4][CC][0][i][k]*pivot;
rhs[k][0][i][2] = rhs[k][0][i][2] *pivot;
coeff = lhsY[0][2][BB][0][i][k];
lhsY[0][3][BB][0][i][k]= lhsY[0][3][BB][0][i][k] - coeff*lhsY[2][3][BB][0][i][k];
lhsY[0][4][BB][0][i][k]= lhsY[0][4][BB][0][i][k] - coeff*lhsY[2][4][BB][0][i][k];
lhsY[0][0][CC][0][i][k] = lhsY[0][0][CC][0][i][k] - coeff*lhsY[2][0][CC][0][i][k];
lhsY[0][1][CC][0][i][k] = lhsY[0][1][CC][0][i][k] - coeff*lhsY[2][1][CC][0][i][k];
lhsY[0][2][CC][0][i][k] = lhsY[0][2][CC][0][i][k] - coeff*lhsY[2][2][CC][0][i][k];
lhsY[0][3][CC][0][i][k] = lhsY[0][3][CC][0][i][k] - coeff*lhsY[2][3][CC][0][i][k];
lhsY[0][4][CC][0][i][k] = lhsY[0][4][CC][0][i][k] - coeff*lhsY[2][4][CC][0][i][k];
rhs[k][0][i][0] = rhs[k][0][i][0] - coeff*rhs[k][0][i][2];
coeff = lhsY[1][2][BB][0][i][k];
lhsY[1][3][BB][0][i][k]= lhsY[1][3][BB][0][i][k] - coeff*lhsY[2][3][BB][0][i][k];
lhsY[1][4][BB][0][i][k]= lhsY[1][4][BB][0][i][k] - coeff*lhsY[2][4][BB][0][i][k];
lhsY[1][0][CC][0][i][k] = lhsY[1][0][CC][0][i][k] - coeff*lhsY[2][0][CC][0][i][k];
lhsY[1][1][CC][0][i][k] = lhsY[1][1][CC][0][i][k] - coeff*lhsY[2][1][CC][0][i][k];
lhsY[1][2][CC][0][i][k] = lhsY[1][2][CC][0][i][k] - coeff*lhsY[2][2][CC][0][i][k];
lhsY[1][3][CC][0][i][k] = lhsY[1][3][CC][0][i][k] - coeff*lhsY[2][3][CC][0][i][k];
lhsY[1][4][CC][0][i][k] = lhsY[1][4][CC][0][i][k] - coeff*lhsY[2][4][CC][0][i][k];
rhs[k][0][i][1] = rhs[k][0][i][1] - coeff*rhs[k][0][i][2];
coeff = lhsY[3][2][BB][0][i][k];
lhsY[3][3][BB][0][i][k]= lhsY[3][3][BB][0][i][k] - coeff*lhsY[2][3][BB][0][i][k];
lhsY[3][4][BB][0][i][k]= lhsY[3][4][BB][0][i][k] - coeff*lhsY[2][4][BB][0][i][k];
lhsY[3][0][CC][0][i][k] = lhsY[3][0][CC][0][i][k] - coeff*lhsY[2][0][CC][0][i][k];
lhsY[3][1][CC][0][i][k] = lhsY[3][1][CC][0][i][k] - coeff*lhsY[2][1][CC][0][i][k];
lhsY[3][2][CC][0][i][k] = lhsY[3][2][CC][0][i][k] - coeff*lhsY[2][2][CC][0][i][k];
lhsY[3][3][CC][0][i][k] = lhsY[3][3][CC][0][i][k] - coeff*lhsY[2][3][CC][0][i][k];
lhsY[3][4][CC][0][i][k] = lhsY[3][4][CC][0][i][k] - coeff*lhsY[2][4][CC][0][i][k];
rhs[k][0][i][3] = rhs[k][0][i][3] - coeff*rhs[k][0][i][2];
coeff = lhsY[4][2][BB][0][i][k];
lhsY[4][3][BB][0][i][k]= lhsY[4][3][BB][0][i][k] - coeff*lhsY[2][3][BB][0][i][k];
lhsY[4][4][BB][0][i][k]= lhsY[4][4][BB][0][i][k] - coeff*lhsY[2][4][BB][0][i][k];
lhsY[4][0][CC][0][i][k] = lhsY[4][0][CC][0][i][k] - coeff*lhsY[2][0][CC][0][i][k];
lhsY[4][1][CC][0][i][k] = lhsY[4][1][CC][0][i][k] - coeff*lhsY[2][1][CC][0][i][k];
lhsY[4][2][CC][0][i][k] = lhsY[4][2][CC][0][i][k] - coeff*lhsY[2][2][CC][0][i][k];
lhsY[4][3][CC][0][i][k] = lhsY[4][3][CC][0][i][k] - coeff*lhsY[2][3][CC][0][i][k];
lhsY[4][4][CC][0][i][k] = lhsY[4][4][CC][0][i][k] - coeff*lhsY[2][4][CC][0][i][k];
rhs[k][0][i][4] = rhs[k][0][i][4] - coeff*rhs[k][0][i][2];
pivot = 1.00/lhsY[3][3][BB][0][i][k];
lhsY[3][4][BB][0][i][k] = lhsY[3][4][BB][0][i][k]*pivot;
lhsY[3][0][CC][0][i][k] = lhsY[3][0][CC][0][i][k]*pivot;
lhsY[3][1][CC][0][i][k] = lhsY[3][1][CC][0][i][k]*pivot;
lhsY[3][2][CC][0][i][k] = lhsY[3][2][CC][0][i][k]*pivot;
lhsY[3][3][CC][0][i][k] = lhsY[3][3][CC][0][i][k]*pivot;
lhsY[3][4][CC][0][i][k] = lhsY[3][4][CC][0][i][k]*pivot;
rhs[k][0][i][3] = rhs[k][0][i][3] *pivot;
coeff = lhsY[0][3][BB][0][i][k];
lhsY[0][4][BB][0][i][k]= lhsY[0][4][BB][0][i][k] - coeff*lhsY[3][4][BB][0][i][k];
lhsY[0][0][CC][0][i][k] = lhsY[0][0][CC][0][i][k] - coeff*lhsY[3][0][CC][0][i][k];
lhsY[0][1][CC][0][i][k] = lhsY[0][1][CC][0][i][k] - coeff*lhsY[3][1][CC][0][i][k];
lhsY[0][2][CC][0][i][k] = lhsY[0][2][CC][0][i][k] - coeff*lhsY[3][2][CC][0][i][k];
lhsY[0][3][CC][0][i][k] = lhsY[0][3][CC][0][i][k] - coeff*lhsY[3][3][CC][0][i][k];
lhsY[0][4][CC][0][i][k] = lhsY[0][4][CC][0][i][k] - coeff*lhsY[3][4][CC][0][i][k];
rhs[k][0][i][0] = rhs[k][0][i][0] - coeff*rhs[k][0][i][3];
coeff = lhsY[1][3][BB][0][i][k];
lhsY[1][4][BB][0][i][k]= lhsY[1][4][BB][0][i][k] - coeff*lhsY[3][4][BB][0][i][k];
lhsY[1][0][CC][0][i][k] = lhsY[1][0][CC][0][i][k] - coeff*lhsY[3][0][CC][0][i][k];
lhsY[1][1][CC][0][i][k] = lhsY[1][1][CC][0][i][k] - coeff*lhsY[3][1][CC][0][i][k];
lhsY[1][2][CC][0][i][k] = lhsY[1][2][CC][0][i][k] - coeff*lhsY[3][2][CC][0][i][k];
lhsY[1][3][CC][0][i][k] = lhsY[1][3][CC][0][i][k] - coeff*lhsY[3][3][CC][0][i][k];
lhsY[1][4][CC][0][i][k] = lhsY[1][4][CC][0][i][k] - coeff*lhsY[3][4][CC][0][i][k];
rhs[k][0][i][1] = rhs[k][0][i][1] - coeff*rhs[k][0][i][3];
coeff = lhsY[2][3][BB][0][i][k];
lhsY[2][4][BB][0][i][k]= lhsY[2][4][BB][0][i][k] - coeff*lhsY[3][4][BB][0][i][k];
lhsY[2][0][CC][0][i][k] = lhsY[2][0][CC][0][i][k] - coeff*lhsY[3][0][CC][0][i][k];
lhsY[2][1][CC][0][i][k] = lhsY[2][1][CC][0][i][k] - coeff*lhsY[3][1][CC][0][i][k];
lhsY[2][2][CC][0][i][k] = lhsY[2][2][CC][0][i][k] - coeff*lhsY[3][2][CC][0][i][k];
lhsY[2][3][CC][0][i][k] = lhsY[2][3][CC][0][i][k] - coeff*lhsY[3][3][CC][0][i][k];
lhsY[2][4][CC][0][i][k] = lhsY[2][4][CC][0][i][k] - coeff*lhsY[3][4][CC][0][i][k];
rhs[k][0][i][2] = rhs[k][0][i][2] - coeff*rhs[k][0][i][3];
coeff = lhsY[4][3][BB][0][i][k];
lhsY[4][4][BB][0][i][k]= lhsY[4][4][BB][0][i][k] - coeff*lhsY[3][4][BB][0][i][k];
lhsY[4][0][CC][0][i][k] = lhsY[4][0][CC][0][i][k] - coeff*lhsY[3][0][CC][0][i][k];
lhsY[4][1][CC][0][i][k] = lhsY[4][1][CC][0][i][k] - coeff*lhsY[3][1][CC][0][i][k];
lhsY[4][2][CC][0][i][k] = lhsY[4][2][CC][0][i][k] - coeff*lhsY[3][2][CC][0][i][k];
lhsY[4][3][CC][0][i][k] = lhsY[4][3][CC][0][i][k] - coeff*lhsY[3][3][CC][0][i][k];
lhsY[4][4][CC][0][i][k] = lhsY[4][4][CC][0][i][k] - coeff*lhsY[3][4][CC][0][i][k];
rhs[k][0][i][4] = rhs[k][0][i][4] - coeff*rhs[k][0][i][3];
pivot = 1.00/lhsY[4][4][BB][0][i][k];
lhsY[4][0][CC][0][i][k] = lhsY[4][0][CC][0][i][k]*pivot;
lhsY[4][1][CC][0][i][k] = lhsY[4][1][CC][0][i][k]*pivot;
lhsY[4][2][CC][0][i][k] = lhsY[4][2][CC][0][i][k]*pivot;
lhsY[4][3][CC][0][i][k] = lhsY[4][3][CC][0][i][k]*pivot;
lhsY[4][4][CC][0][i][k] = lhsY[4][4][CC][0][i][k]*pivot;
rhs[k][0][i][4] = rhs[k][0][i][4] *pivot;
coeff = lhsY[0][4][BB][0][i][k];
lhsY[0][0][CC][0][i][k] = lhsY[0][0][CC][0][i][k] - coeff*lhsY[4][0][CC][0][i][k];
lhsY[0][1][CC][0][i][k] = lhsY[0][1][CC][0][i][k] - coeff*lhsY[4][1][CC][0][i][k];
lhsY[0][2][CC][0][i][k] = lhsY[0][2][CC][0][i][k] - coeff*lhsY[4][2][CC][0][i][k];
lhsY[0][3][CC][0][i][k] = lhsY[0][3][CC][0][i][k] - coeff*lhsY[4][3][CC][0][i][k];
lhsY[0][4][CC][0][i][k] = lhsY[0][4][CC][0][i][k] - coeff*lhsY[4][4][CC][0][i][k];
rhs[k][0][i][0] = rhs[k][0][i][0] - coeff*rhs[k][0][i][4];
coeff = lhsY[1][4][BB][0][i][k];
lhsY[1][0][CC][0][i][k] = lhsY[1][0][CC][0][i][k] - coeff*lhsY[4][0][CC][0][i][k];
lhsY[1][1][CC][0][i][k] = lhsY[1][1][CC][0][i][k] - coeff*lhsY[4][1][CC][0][i][k];
lhsY[1][2][CC][0][i][k] = lhsY[1][2][CC][0][i][k] - coeff*lhsY[4][2][CC][0][i][k];
lhsY[1][3][CC][0][i][k] = lhsY[1][3][CC][0][i][k] - coeff*lhsY[4][3][CC][0][i][k];
lhsY[1][4][CC][0][i][k] = lhsY[1][4][CC][0][i][k] - coeff*lhsY[4][4][CC][0][i][k];
rhs[k][0][i][1] = rhs[k][0][i][1] - coeff*rhs[k][0][i][4];
coeff = lhsY[2][4][BB][0][i][k];
lhsY[2][0][CC][0][i][k] = lhsY[2][0][CC][0][i][k] - coeff*lhsY[4][0][CC][0][i][k];
lhsY[2][1][CC][0][i][k] = lhsY[2][1][CC][0][i][k] - coeff*lhsY[4][1][CC][0][i][k];
lhsY[2][2][CC][0][i][k] = lhsY[2][2][CC][0][i][k] - coeff*lhsY[4][2][CC][0][i][k];
lhsY[2][3][CC][0][i][k] = lhsY[2][3][CC][0][i][k] - coeff*lhsY[4][3][CC][0][i][k];
lhsY[2][4][CC][0][i][k] = lhsY[2][4][CC][0][i][k] - coeff*lhsY[4][4][CC][0][i][k];
rhs[k][0][i][2] = rhs[k][0][i][2] - coeff*rhs[k][0][i][4];
coeff = lhsY[3][4][BB][0][i][k];
lhsY[3][0][CC][0][i][k] = lhsY[3][0][CC][0][i][k] - coeff*lhsY[4][0][CC][0][i][k];
lhsY[3][1][CC][0][i][k] = lhsY[3][1][CC][0][i][k] - coeff*lhsY[4][1][CC][0][i][k];
lhsY[3][2][CC][0][i][k] = lhsY[3][2][CC][0][i][k] - coeff*lhsY[4][2][CC][0][i][k];
lhsY[3][3][CC][0][i][k] = lhsY[3][3][CC][0][i][k] - coeff*lhsY[4][3][CC][0][i][k];
lhsY[3][4][CC][0][i][k] = lhsY[3][4][CC][0][i][k] - coeff*lhsY[4][4][CC][0][i][k];
rhs[k][0][i][3] = rhs[k][0][i][3] - coeff*rhs[k][0][i][4];
}/*end i*/
}/*end k*/
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
#pragma omp target teams distribute parallel for private(j,k)
for (i = 1; i <= gp02; i++) {
for (j = 1; j <= jsize-1; j++) {
#pragma omp simd private(pivot,coeff)
for (k = 1; k <= gp22; k++) {
//-------------------------------------------------------------------
// subtract A*lhsY[k][i]_vector(j-1) from lhsY[k][i]_vector(j)
//
// rhs(j) = rhs(j) - A*rhs(j-1)
//-------------------------------------------------------------------
//matvec_sub(lhsY[i][j-1][AA], rhs[k][j][i][k], rhs[k][j][i]);
/*
for(m = 0; m < 5; m++){
rhs[k][j][i][m] = rhs[k][j][i][m] - lhsY[m][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[m][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[m][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[m][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[m][4][AA][j][i][k]*rhs[k][j-1][i][4];
}
*/
rhs[k][j][i][0] = rhs[k][j][i][0] - lhsY[0][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[0][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[0][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[0][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[0][4][AA][j][i][k]*rhs[k][j-1][i][4];
rhs[k][j][i][1] = rhs[k][j][i][1] - lhsY[1][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[1][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[1][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[1][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[1][4][AA][j][i][k]*rhs[k][j-1][i][4];
rhs[k][j][i][2] = rhs[k][j][i][2] - lhsY[2][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[2][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[2][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[2][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[2][4][AA][j][i][k]*rhs[k][j-1][i][4];
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsY[3][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[3][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[3][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[3][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[3][4][AA][j][i][k]*rhs[k][j-1][i][4];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsY[4][0][AA][j][i][k]*rhs[k][j-1][i][0]
- lhsY[4][1][AA][j][i][k]*rhs[k][j-1][i][1]
- lhsY[4][2][AA][j][i][k]*rhs[k][j-1][i][2]
- lhsY[4][3][AA][j][i][k]*rhs[k][j-1][i][3]
- lhsY[4][4][AA][j][i][k]*rhs[k][j-1][i][4];
//-------------------------------------------------------------------
// B(j) = B(j) - C(j-1)*A(j)
//-------------------------------------------------------------------
// matmul_sub(lhsY[j-1][i][AA], lhsY[k][j][i][k][CC], lhsY[k][i][j][BB]);
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsY[n][m][BB][j][i][k] = lhsY[n][m][BB][j][i][k] - lhsY[n][0][AA][j][i][k]*lhsY[0][m][CC][j-1][i][k]
- lhsY[n][1][AA][j][i][k]*lhsY[1][m][CC][j-1][i][k]
- lhsY[n][2][AA][j][i][k]*lhsY[2][m][CC][j-1][i][k]
- lhsY[n][3][AA][j][i][k]*lhsY[3][m][CC][j-1][i][k]
- lhsY[n][4][AA][j][i][k]*lhsY[4][m][CC][j-1][i][k];
}
}
*/
lhsY[0][0][BB][j][i][k] = lhsY[0][0][BB][j][i][k] - lhsY[0][0][AA][j][i][k]*lhsY[0][0][CC][j-1][i][k]
- lhsY[0][1][AA][j][i][k]*lhsY[1][0][CC][j-1][i][k]
- lhsY[0][2][AA][j][i][k]*lhsY[2][0][CC][j-1][i][k]
- lhsY[0][3][AA][j][i][k]*lhsY[3][0][CC][j-1][i][k]
- lhsY[0][4][AA][j][i][k]*lhsY[4][0][CC][j-1][i][k];
lhsY[1][0][BB][j][i][k] = lhsY[1][0][BB][j][i][k] - lhsY[1][0][AA][j][i][k]*lhsY[0][0][CC][j-1][i][k]
- lhsY[1][1][AA][j][i][k]*lhsY[1][0][CC][j-1][i][k]
- lhsY[1][2][AA][j][i][k]*lhsY[2][0][CC][j-1][i][k]
- lhsY[1][3][AA][j][i][k]*lhsY[3][0][CC][j-1][i][k]
- lhsY[1][4][AA][j][i][k]*lhsY[4][0][CC][j-1][i][k];
lhsY[2][0][BB][j][i][k] = lhsY[2][0][BB][j][i][k] - lhsY[2][0][AA][j][i][k]*lhsY[0][0][CC][j-1][i][k]
- lhsY[2][1][AA][j][i][k]*lhsY[1][0][CC][j-1][i][k]
- lhsY[2][2][AA][j][i][k]*lhsY[2][0][CC][j-1][i][k]
- lhsY[2][3][AA][j][i][k]*lhsY[3][0][CC][j-1][i][k]
- lhsY[2][4][AA][j][i][k]*lhsY[4][0][CC][j-1][i][k];
lhsY[3][0][BB][j][i][k] = lhsY[3][0][BB][j][i][k] - lhsY[3][0][AA][j][i][k]*lhsY[0][0][CC][j-1][i][k]
- lhsY[3][1][AA][j][i][k]*lhsY[1][0][CC][j-1][i][k]
- lhsY[3][2][AA][j][i][k]*lhsY[2][0][CC][j-1][i][k]
- lhsY[3][3][AA][j][i][k]*lhsY[3][0][CC][j-1][i][k]
- lhsY[3][4][AA][j][i][k]*lhsY[4][0][CC][j-1][i][k];
lhsY[4][0][BB][j][i][k] = lhsY[4][0][BB][j][i][k] - lhsY[4][0][AA][j][i][k]*lhsY[0][0][CC][j-1][i][k]
- lhsY[4][1][AA][j][i][k]*lhsY[1][0][CC][j-1][i][k]
- lhsY[4][2][AA][j][i][k]*lhsY[2][0][CC][j-1][i][k]
- lhsY[4][3][AA][j][i][k]*lhsY[3][0][CC][j-1][i][k]
- lhsY[4][4][AA][j][i][k]*lhsY[4][0][CC][j-1][i][k];
lhsY[0][1][BB][j][i][k] = lhsY[0][1][BB][j][i][k] - lhsY[0][0][AA][j][i][k]*lhsY[0][1][CC][j-1][i][k]
- lhsY[0][1][AA][j][i][k]*lhsY[1][1][CC][j-1][i][k]
- lhsY[0][2][AA][j][i][k]*lhsY[2][1][CC][j-1][i][k]
- lhsY[0][3][AA][j][i][k]*lhsY[3][1][CC][j-1][i][k]
- lhsY[0][4][AA][j][i][k]*lhsY[4][1][CC][j-1][i][k];
lhsY[1][1][BB][j][i][k] = lhsY[1][1][BB][j][i][k] - lhsY[1][0][AA][j][i][k]*lhsY[0][1][CC][j-1][i][k]
- lhsY[1][1][AA][j][i][k]*lhsY[1][1][CC][j-1][i][k]
- lhsY[1][2][AA][j][i][k]*lhsY[2][1][CC][j-1][i][k]
- lhsY[1][3][AA][j][i][k]*lhsY[3][1][CC][j-1][i][k]
- lhsY[1][4][AA][j][i][k]*lhsY[4][1][CC][j-1][i][k];
lhsY[2][1][BB][j][i][k] = lhsY[2][1][BB][j][i][k] - lhsY[2][0][AA][j][i][k]*lhsY[0][1][CC][j-1][i][k]
- lhsY[2][1][AA][j][i][k]*lhsY[1][1][CC][j-1][i][k]
- lhsY[2][2][AA][j][i][k]*lhsY[2][1][CC][j-1][i][k]
- lhsY[2][3][AA][j][i][k]*lhsY[3][1][CC][j-1][i][k]
- lhsY[2][4][AA][j][i][k]*lhsY[4][1][CC][j-1][i][k];
lhsY[3][1][BB][j][i][k] = lhsY[3][1][BB][j][i][k] - lhsY[3][0][AA][j][i][k]*lhsY[0][1][CC][j-1][i][k]
- lhsY[3][1][AA][j][i][k]*lhsY[1][1][CC][j-1][i][k]
- lhsY[3][2][AA][j][i][k]*lhsY[2][1][CC][j-1][i][k]
- lhsY[3][3][AA][j][i][k]*lhsY[3][1][CC][j-1][i][k]
- lhsY[3][4][AA][j][i][k]*lhsY[4][1][CC][j-1][i][k];
lhsY[4][1][BB][j][i][k] = lhsY[4][1][BB][j][i][k] - lhsY[4][0][AA][j][i][k]*lhsY[0][1][CC][j-1][i][k]
- lhsY[4][1][AA][j][i][k]*lhsY[1][1][CC][j-1][i][k]
- lhsY[4][2][AA][j][i][k]*lhsY[2][1][CC][j-1][i][k]
- lhsY[4][3][AA][j][i][k]*lhsY[3][1][CC][j-1][i][k]
- lhsY[4][4][AA][j][i][k]*lhsY[4][1][CC][j-1][i][k];
lhsY[0][2][BB][j][i][k] = lhsY[0][2][BB][j][i][k] - lhsY[0][0][AA][j][i][k]*lhsY[0][2][CC][j-1][i][k]
- lhsY[0][1][AA][j][i][k]*lhsY[1][2][CC][j-1][i][k]
- lhsY[0][2][AA][j][i][k]*lhsY[2][2][CC][j-1][i][k]
- lhsY[0][3][AA][j][i][k]*lhsY[3][2][CC][j-1][i][k]
- lhsY[0][4][AA][j][i][k]*lhsY[4][2][CC][j-1][i][k];
lhsY[1][2][BB][j][i][k] = lhsY[1][2][BB][j][i][k] - lhsY[1][0][AA][j][i][k]*lhsY[0][2][CC][j-1][i][k]
- lhsY[1][1][AA][j][i][k]*lhsY[1][2][CC][j-1][i][k]
- lhsY[1][2][AA][j][i][k]*lhsY[2][2][CC][j-1][i][k]
- lhsY[1][3][AA][j][i][k]*lhsY[3][2][CC][j-1][i][k]
- lhsY[1][4][AA][j][i][k]*lhsY[4][2][CC][j-1][i][k];
lhsY[2][2][BB][j][i][k] = lhsY[2][2][BB][j][i][k] - lhsY[2][0][AA][j][i][k]*lhsY[0][2][CC][j-1][i][k]
- lhsY[2][1][AA][j][i][k]*lhsY[1][2][CC][j-1][i][k]
- lhsY[2][2][AA][j][i][k]*lhsY[2][2][CC][j-1][i][k]
- lhsY[2][3][AA][j][i][k]*lhsY[3][2][CC][j-1][i][k]
- lhsY[2][4][AA][j][i][k]*lhsY[4][2][CC][j-1][i][k];
lhsY[3][2][BB][j][i][k] = lhsY[3][2][BB][j][i][k] - lhsY[3][0][AA][j][i][k]*lhsY[0][2][CC][j-1][i][k]
- lhsY[3][1][AA][j][i][k]*lhsY[1][2][CC][j-1][i][k]
- lhsY[3][2][AA][j][i][k]*lhsY[2][2][CC][j-1][i][k]
- lhsY[3][3][AA][j][i][k]*lhsY[3][2][CC][j-1][i][k]
- lhsY[3][4][AA][j][i][k]*lhsY[4][2][CC][j-1][i][k];
lhsY[4][2][BB][j][i][k] = lhsY[4][2][BB][j][i][k] - lhsY[4][0][AA][j][i][k]*lhsY[0][2][CC][j-1][i][k]
- lhsY[4][1][AA][j][i][k]*lhsY[1][2][CC][j-1][i][k]
- lhsY[4][2][AA][j][i][k]*lhsY[2][2][CC][j-1][i][k]
- lhsY[4][3][AA][j][i][k]*lhsY[3][2][CC][j-1][i][k]
- lhsY[4][4][AA][j][i][k]*lhsY[4][2][CC][j-1][i][k];
lhsY[0][3][BB][j][i][k] = lhsY[0][3][BB][j][i][k] - lhsY[0][0][AA][j][i][k]*lhsY[0][3][CC][j-1][i][k]
- lhsY[0][1][AA][j][i][k]*lhsY[1][3][CC][j-1][i][k]
- lhsY[0][2][AA][j][i][k]*lhsY[2][3][CC][j-1][i][k]
- lhsY[0][3][AA][j][i][k]*lhsY[3][3][CC][j-1][i][k]
- lhsY[0][4][AA][j][i][k]*lhsY[4][3][CC][j-1][i][k];
lhsY[1][3][BB][j][i][k] = lhsY[1][3][BB][j][i][k] - lhsY[1][0][AA][j][i][k]*lhsY[0][3][CC][j-1][i][k]
- lhsY[1][1][AA][j][i][k]*lhsY[1][3][CC][j-1][i][k]
- lhsY[1][2][AA][j][i][k]*lhsY[2][3][CC][j-1][i][k]
- lhsY[1][3][AA][j][i][k]*lhsY[3][3][CC][j-1][i][k]
- lhsY[1][4][AA][j][i][k]*lhsY[4][3][CC][j-1][i][k];
lhsY[2][3][BB][j][i][k] = lhsY[2][3][BB][j][i][k] - lhsY[2][0][AA][j][i][k]*lhsY[0][3][CC][j-1][i][k]
- lhsY[2][1][AA][j][i][k]*lhsY[1][3][CC][j-1][i][k]
- lhsY[2][2][AA][j][i][k]*lhsY[2][3][CC][j-1][i][k]
- lhsY[2][3][AA][j][i][k]*lhsY[3][3][CC][j-1][i][k]
- lhsY[2][4][AA][j][i][k]*lhsY[4][3][CC][j-1][i][k];
lhsY[3][3][BB][j][i][k] = lhsY[3][3][BB][j][i][k] - lhsY[3][0][AA][j][i][k]*lhsY[0][3][CC][j-1][i][k]
- lhsY[3][1][AA][j][i][k]*lhsY[1][3][CC][j-1][i][k]
- lhsY[3][2][AA][j][i][k]*lhsY[2][3][CC][j-1][i][k]
- lhsY[3][3][AA][j][i][k]*lhsY[3][3][CC][j-1][i][k]
- lhsY[3][4][AA][j][i][k]*lhsY[4][3][CC][j-1][i][k];
lhsY[4][3][BB][j][i][k] = lhsY[4][3][BB][j][i][k] - lhsY[4][0][AA][j][i][k]*lhsY[0][3][CC][j-1][i][k]
- lhsY[4][1][AA][j][i][k]*lhsY[1][3][CC][j-1][i][k]
- lhsY[4][2][AA][j][i][k]*lhsY[2][3][CC][j-1][i][k]
- lhsY[4][3][AA][j][i][k]*lhsY[3][3][CC][j-1][i][k]
- lhsY[4][4][AA][j][i][k]*lhsY[4][3][CC][j-1][i][k];
lhsY[0][4][BB][j][i][k] = lhsY[0][4][BB][j][i][k] - lhsY[0][0][AA][j][i][k]*lhsY[0][4][CC][j-1][i][k]
- lhsY[0][1][AA][j][i][k]*lhsY[1][4][CC][j-1][i][k]
- lhsY[0][2][AA][j][i][k]*lhsY[2][4][CC][j-1][i][k]
- lhsY[0][3][AA][j][i][k]*lhsY[3][4][CC][j-1][i][k]
- lhsY[0][4][AA][j][i][k]*lhsY[4][4][CC][j-1][i][k];
lhsY[1][4][BB][j][i][k] = lhsY[1][4][BB][j][i][k] - lhsY[1][0][AA][j][i][k]*lhsY[0][4][CC][j-1][i][k]
- lhsY[1][1][AA][j][i][k]*lhsY[1][4][CC][j-1][i][k]
- lhsY[1][2][AA][j][i][k]*lhsY[2][4][CC][j-1][i][k]
- lhsY[1][3][AA][j][i][k]*lhsY[3][4][CC][j-1][i][k]
- lhsY[1][4][AA][j][i][k]*lhsY[4][4][CC][j-1][i][k];
lhsY[2][4][BB][j][i][k] = lhsY[2][4][BB][j][i][k] - lhsY[2][0][AA][j][i][k]*lhsY[0][4][CC][j-1][i][k]
- lhsY[2][1][AA][j][i][k]*lhsY[1][4][CC][j-1][i][k]
- lhsY[2][2][AA][j][i][k]*lhsY[2][4][CC][j-1][i][k]
- lhsY[2][3][AA][j][i][k]*lhsY[3][4][CC][j-1][i][k]
- lhsY[2][4][AA][j][i][k]*lhsY[4][4][CC][j-1][i][k];
lhsY[3][4][BB][j][i][k] = lhsY[3][4][BB][j][i][k] - lhsY[3][0][AA][j][i][k]*lhsY[0][4][CC][j-1][i][k]
- lhsY[3][1][AA][j][i][k]*lhsY[1][4][CC][j-1][i][k]
- lhsY[3][2][AA][j][i][k]*lhsY[2][4][CC][j-1][i][k]
- lhsY[3][3][AA][j][i][k]*lhsY[3][4][CC][j-1][i][k]
- lhsY[3][4][AA][j][i][k]*lhsY[4][4][CC][j-1][i][k];
lhsY[4][4][BB][j][i][k] = lhsY[4][4][BB][j][i][k] - lhsY[4][0][AA][j][i][k]*lhsY[0][4][CC][j-1][i][k]
- lhsY[4][1][AA][j][i][k]*lhsY[1][4][CC][j-1][i][k]
- lhsY[4][2][AA][j][i][k]*lhsY[2][4][CC][j-1][i][k]
- lhsY[4][3][AA][j][i][k]*lhsY[3][4][CC][j-1][i][k]
- lhsY[4][4][AA][j][i][k]*lhsY[4][4][CC][j-1][i][k];
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs
//-------------------------------------------------------------------
//binvcrhs( lhsY[j][i][BB], lhsY[k][j][i][k][CC], rhs[k][j][i] );
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsY[m][m][BB][j][i][k];
for(n = m+1; n < 5; n++){
lhsY[m][n][BB][j][i][k] = lhsY[m][n][BB][j][i][k]*pivot;
}
lhsY[m][0][CC][j][i][k] = lhsY[m][0][CC][j][i][k]*pivot;
lhsY[m][1][CC][j][i][k] = lhsY[m][1][CC][j][i][k]*pivot;
lhsY[m][2][CC][j][i][k] = lhsY[m][2][CC][j][i][k]*pivot;
lhsY[m][3][CC][j][i][k] = lhsY[m][3][CC][j][i][k]*pivot;
lhsY[m][4][CC][j][i][k] = lhsY[m][4][CC][j][i][k]*pivot;
rhs[k][j][i][m] = rhs[k][j][i][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsY[n][m][BB][j][i][k];
for(z = m+1; z < 5; z++){
lhsY[n][z][BB][j][i][k] = lhsY[n][z][BB][j][i][k] - coeff*lhsY[m][z][BB][j][i][k];
}
lhsY[n][0][CC][j][i][k] = lhsY[n][0][CC][j][i][k] - coeff*lhsY[m][0][CC][j][i][k];
lhsY[n][1][CC][j][i][k] = lhsY[n][1][CC][j][i][k] - coeff*lhsY[m][1][CC][j][i][k];
lhsY[n][2][CC][j][i][k] = lhsY[n][2][CC][j][i][k] - coeff*lhsY[m][2][CC][j][i][k];
lhsY[n][3][CC][j][i][k] = lhsY[n][3][CC][j][i][k] - coeff*lhsY[m][3][CC][j][i][k];
lhsY[n][4][CC][j][i][k] = lhsY[n][4][CC][j][i][k] - coeff*lhsY[m][4][CC][j][i][k];
rhs[k][j][i][n] = rhs[k][j][i][n] - coeff*rhs[k][j][i][m];
}
}
}
*/
pivot = 1.00/lhsY[0][0][BB][j][i][k];
lhsY[0][1][BB][j][i][k] = lhsY[0][1][BB][j][i][k]*pivot;
lhsY[0][2][BB][j][i][k] = lhsY[0][2][BB][j][i][k]*pivot;
lhsY[0][3][BB][j][i][k] = lhsY[0][3][BB][j][i][k]*pivot;
lhsY[0][4][BB][j][i][k] = lhsY[0][4][BB][j][i][k]*pivot;
lhsY[0][0][CC][j][i][k] = lhsY[0][0][CC][j][i][k]*pivot;
lhsY[0][1][CC][j][i][k] = lhsY[0][1][CC][j][i][k]*pivot;
lhsY[0][2][CC][j][i][k] = lhsY[0][2][CC][j][i][k]*pivot;
lhsY[0][3][CC][j][i][k] = lhsY[0][3][CC][j][i][k]*pivot;
lhsY[0][4][CC][j][i][k] = lhsY[0][4][CC][j][i][k]*pivot;
rhs[k][j][i][0] = rhs[k][j][i][0] *pivot;
coeff = lhsY[1][0][BB][j][i][k];
lhsY[1][1][BB][j][i][k]= lhsY[1][1][BB][j][i][k] - coeff*lhsY[0][1][BB][j][i][k];
lhsY[1][2][BB][j][i][k]= lhsY[1][2][BB][j][i][k] - coeff*lhsY[0][2][BB][j][i][k];
lhsY[1][3][BB][j][i][k]= lhsY[1][3][BB][j][i][k] - coeff*lhsY[0][3][BB][j][i][k];
lhsY[1][4][BB][j][i][k]= lhsY[1][4][BB][j][i][k] - coeff*lhsY[0][4][BB][j][i][k];
lhsY[1][0][CC][j][i][k] = lhsY[1][0][CC][j][i][k] - coeff*lhsY[0][0][CC][j][i][k];
lhsY[1][1][CC][j][i][k] = lhsY[1][1][CC][j][i][k] - coeff*lhsY[0][1][CC][j][i][k];
lhsY[1][2][CC][j][i][k] = lhsY[1][2][CC][j][i][k] - coeff*lhsY[0][2][CC][j][i][k];
lhsY[1][3][CC][j][i][k] = lhsY[1][3][CC][j][i][k] - coeff*lhsY[0][3][CC][j][i][k];
lhsY[1][4][CC][j][i][k] = lhsY[1][4][CC][j][i][k] - coeff*lhsY[0][4][CC][j][i][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][0];
coeff = lhsY[2][0][BB][j][i][k];
lhsY[2][1][BB][j][i][k]= lhsY[2][1][BB][j][i][k] - coeff*lhsY[0][1][BB][j][i][k];
lhsY[2][2][BB][j][i][k]= lhsY[2][2][BB][j][i][k] - coeff*lhsY[0][2][BB][j][i][k];
lhsY[2][3][BB][j][i][k]= lhsY[2][3][BB][j][i][k] - coeff*lhsY[0][3][BB][j][i][k];
lhsY[2][4][BB][j][i][k]= lhsY[2][4][BB][j][i][k] - coeff*lhsY[0][4][BB][j][i][k];
lhsY[2][0][CC][j][i][k] = lhsY[2][0][CC][j][i][k] - coeff*lhsY[0][0][CC][j][i][k];
lhsY[2][1][CC][j][i][k] = lhsY[2][1][CC][j][i][k] - coeff*lhsY[0][1][CC][j][i][k];
lhsY[2][2][CC][j][i][k] = lhsY[2][2][CC][j][i][k] - coeff*lhsY[0][2][CC][j][i][k];
lhsY[2][3][CC][j][i][k] = lhsY[2][3][CC][j][i][k] - coeff*lhsY[0][3][CC][j][i][k];
lhsY[2][4][CC][j][i][k] = lhsY[2][4][CC][j][i][k] - coeff*lhsY[0][4][CC][j][i][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][0];
coeff = lhsY[3][0][BB][j][i][k];
lhsY[3][1][BB][j][i][k]= lhsY[3][1][BB][j][i][k] - coeff*lhsY[0][1][BB][j][i][k];
lhsY[3][2][BB][j][i][k]= lhsY[3][2][BB][j][i][k] - coeff*lhsY[0][2][BB][j][i][k];
lhsY[3][3][BB][j][i][k]= lhsY[3][3][BB][j][i][k] - coeff*lhsY[0][3][BB][j][i][k];
lhsY[3][4][BB][j][i][k]= lhsY[3][4][BB][j][i][k] - coeff*lhsY[0][4][BB][j][i][k];
lhsY[3][0][CC][j][i][k] = lhsY[3][0][CC][j][i][k] - coeff*lhsY[0][0][CC][j][i][k];
lhsY[3][1][CC][j][i][k] = lhsY[3][1][CC][j][i][k] - coeff*lhsY[0][1][CC][j][i][k];
lhsY[3][2][CC][j][i][k] = lhsY[3][2][CC][j][i][k] - coeff*lhsY[0][2][CC][j][i][k];
lhsY[3][3][CC][j][i][k] = lhsY[3][3][CC][j][i][k] - coeff*lhsY[0][3][CC][j][i][k];
lhsY[3][4][CC][j][i][k] = lhsY[3][4][CC][j][i][k] - coeff*lhsY[0][4][CC][j][i][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][0];
coeff = lhsY[4][0][BB][j][i][k];
lhsY[4][1][BB][j][i][k]= lhsY[4][1][BB][j][i][k] - coeff*lhsY[0][1][BB][j][i][k];
lhsY[4][2][BB][j][i][k]= lhsY[4][2][BB][j][i][k] - coeff*lhsY[0][2][BB][j][i][k];
lhsY[4][3][BB][j][i][k]= lhsY[4][3][BB][j][i][k] - coeff*lhsY[0][3][BB][j][i][k];
lhsY[4][4][BB][j][i][k]= lhsY[4][4][BB][j][i][k] - coeff*lhsY[0][4][BB][j][i][k];
lhsY[4][0][CC][j][i][k] = lhsY[4][0][CC][j][i][k] - coeff*lhsY[0][0][CC][j][i][k];
lhsY[4][1][CC][j][i][k] = lhsY[4][1][CC][j][i][k] - coeff*lhsY[0][1][CC][j][i][k];
lhsY[4][2][CC][j][i][k] = lhsY[4][2][CC][j][i][k] - coeff*lhsY[0][2][CC][j][i][k];
lhsY[4][3][CC][j][i][k] = lhsY[4][3][CC][j][i][k] - coeff*lhsY[0][3][CC][j][i][k];
lhsY[4][4][CC][j][i][k] = lhsY[4][4][CC][j][i][k] - coeff*lhsY[0][4][CC][j][i][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][0];
pivot = 1.00/lhsY[1][1][BB][j][i][k];
lhsY[1][2][BB][j][i][k] = lhsY[1][2][BB][j][i][k]*pivot;
lhsY[1][3][BB][j][i][k] = lhsY[1][3][BB][j][i][k]*pivot;
lhsY[1][4][BB][j][i][k] = lhsY[1][4][BB][j][i][k]*pivot;
lhsY[1][0][CC][j][i][k] = lhsY[1][0][CC][j][i][k]*pivot;
lhsY[1][1][CC][j][i][k] = lhsY[1][1][CC][j][i][k]*pivot;
lhsY[1][2][CC][j][i][k] = lhsY[1][2][CC][j][i][k]*pivot;
lhsY[1][3][CC][j][i][k] = lhsY[1][3][CC][j][i][k]*pivot;
lhsY[1][4][CC][j][i][k] = lhsY[1][4][CC][j][i][k]*pivot;
rhs[k][j][i][1] = rhs[k][j][i][1] *pivot;
coeff = lhsY[0][1][BB][j][i][k];
lhsY[0][2][BB][j][i][k]= lhsY[0][2][BB][j][i][k] - coeff*lhsY[1][2][BB][j][i][k];
lhsY[0][3][BB][j][i][k]= lhsY[0][3][BB][j][i][k] - coeff*lhsY[1][3][BB][j][i][k];
lhsY[0][4][BB][j][i][k]= lhsY[0][4][BB][j][i][k] - coeff*lhsY[1][4][BB][j][i][k];
lhsY[0][0][CC][j][i][k] = lhsY[0][0][CC][j][i][k] - coeff*lhsY[1][0][CC][j][i][k];
lhsY[0][1][CC][j][i][k] = lhsY[0][1][CC][j][i][k] - coeff*lhsY[1][1][CC][j][i][k];
lhsY[0][2][CC][j][i][k] = lhsY[0][2][CC][j][i][k] - coeff*lhsY[1][2][CC][j][i][k];
lhsY[0][3][CC][j][i][k] = lhsY[0][3][CC][j][i][k] - coeff*lhsY[1][3][CC][j][i][k];
lhsY[0][4][CC][j][i][k] = lhsY[0][4][CC][j][i][k] - coeff*lhsY[1][4][CC][j][i][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][1];
coeff = lhsY[2][1][BB][j][i][k];
lhsY[2][2][BB][j][i][k]= lhsY[2][2][BB][j][i][k] - coeff*lhsY[1][2][BB][j][i][k];
lhsY[2][3][BB][j][i][k]= lhsY[2][3][BB][j][i][k] - coeff*lhsY[1][3][BB][j][i][k];
lhsY[2][4][BB][j][i][k]= lhsY[2][4][BB][j][i][k] - coeff*lhsY[1][4][BB][j][i][k];
lhsY[2][0][CC][j][i][k] = lhsY[2][0][CC][j][i][k] - coeff*lhsY[1][0][CC][j][i][k];
lhsY[2][1][CC][j][i][k] = lhsY[2][1][CC][j][i][k] - coeff*lhsY[1][1][CC][j][i][k];
lhsY[2][2][CC][j][i][k] = lhsY[2][2][CC][j][i][k] - coeff*lhsY[1][2][CC][j][i][k];
lhsY[2][3][CC][j][i][k] = lhsY[2][3][CC][j][i][k] - coeff*lhsY[1][3][CC][j][i][k];
lhsY[2][4][CC][j][i][k] = lhsY[2][4][CC][j][i][k] - coeff*lhsY[1][4][CC][j][i][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][1];
coeff = lhsY[3][1][BB][j][i][k];
lhsY[3][2][BB][j][i][k]= lhsY[3][2][BB][j][i][k] - coeff*lhsY[1][2][BB][j][i][k];
lhsY[3][3][BB][j][i][k]= lhsY[3][3][BB][j][i][k] - coeff*lhsY[1][3][BB][j][i][k];
lhsY[3][4][BB][j][i][k]= lhsY[3][4][BB][j][i][k] - coeff*lhsY[1][4][BB][j][i][k];
lhsY[3][0][CC][j][i][k] = lhsY[3][0][CC][j][i][k] - coeff*lhsY[1][0][CC][j][i][k];
lhsY[3][1][CC][j][i][k] = lhsY[3][1][CC][j][i][k] - coeff*lhsY[1][1][CC][j][i][k];
lhsY[3][2][CC][j][i][k] = lhsY[3][2][CC][j][i][k] - coeff*lhsY[1][2][CC][j][i][k];
lhsY[3][3][CC][j][i][k] = lhsY[3][3][CC][j][i][k] - coeff*lhsY[1][3][CC][j][i][k];
lhsY[3][4][CC][j][i][k] = lhsY[3][4][CC][j][i][k] - coeff*lhsY[1][4][CC][j][i][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][1];
coeff = lhsY[4][1][BB][j][i][k];
lhsY[4][2][BB][j][i][k]= lhsY[4][2][BB][j][i][k] - coeff*lhsY[1][2][BB][j][i][k];
lhsY[4][3][BB][j][i][k]= lhsY[4][3][BB][j][i][k] - coeff*lhsY[1][3][BB][j][i][k];
lhsY[4][4][BB][j][i][k]= lhsY[4][4][BB][j][i][k] - coeff*lhsY[1][4][BB][j][i][k];
lhsY[4][0][CC][j][i][k] = lhsY[4][0][CC][j][i][k] - coeff*lhsY[1][0][CC][j][i][k];
lhsY[4][1][CC][j][i][k] = lhsY[4][1][CC][j][i][k] - coeff*lhsY[1][1][CC][j][i][k];
lhsY[4][2][CC][j][i][k] = lhsY[4][2][CC][j][i][k] - coeff*lhsY[1][2][CC][j][i][k];
lhsY[4][3][CC][j][i][k] = lhsY[4][3][CC][j][i][k] - coeff*lhsY[1][3][CC][j][i][k];
lhsY[4][4][CC][j][i][k] = lhsY[4][4][CC][j][i][k] - coeff*lhsY[1][4][CC][j][i][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][1];
pivot = 1.00/lhsY[2][2][BB][j][i][k];
lhsY[2][3][BB][j][i][k] = lhsY[2][3][BB][j][i][k]*pivot;
lhsY[2][4][BB][j][i][k] = lhsY[2][4][BB][j][i][k]*pivot;
lhsY[2][0][CC][j][i][k] = lhsY[2][0][CC][j][i][k]*pivot;
lhsY[2][1][CC][j][i][k] = lhsY[2][1][CC][j][i][k]*pivot;
lhsY[2][2][CC][j][i][k] = lhsY[2][2][CC][j][i][k]*pivot;
lhsY[2][3][CC][j][i][k] = lhsY[2][3][CC][j][i][k]*pivot;
lhsY[2][4][CC][j][i][k] = lhsY[2][4][CC][j][i][k]*pivot;
rhs[k][j][i][2] = rhs[k][j][i][2] *pivot;
coeff = lhsY[0][2][BB][j][i][k];
lhsY[0][3][BB][j][i][k]= lhsY[0][3][BB][j][i][k] - coeff*lhsY[2][3][BB][j][i][k];
lhsY[0][4][BB][j][i][k]= lhsY[0][4][BB][j][i][k] - coeff*lhsY[2][4][BB][j][i][k];
lhsY[0][0][CC][j][i][k] = lhsY[0][0][CC][j][i][k] - coeff*lhsY[2][0][CC][j][i][k];
lhsY[0][1][CC][j][i][k] = lhsY[0][1][CC][j][i][k] - coeff*lhsY[2][1][CC][j][i][k];
lhsY[0][2][CC][j][i][k] = lhsY[0][2][CC][j][i][k] - coeff*lhsY[2][2][CC][j][i][k];
lhsY[0][3][CC][j][i][k] = lhsY[0][3][CC][j][i][k] - coeff*lhsY[2][3][CC][j][i][k];
lhsY[0][4][CC][j][i][k] = lhsY[0][4][CC][j][i][k] - coeff*lhsY[2][4][CC][j][i][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][2];
coeff = lhsY[1][2][BB][j][i][k];
lhsY[1][3][BB][j][i][k]= lhsY[1][3][BB][j][i][k] - coeff*lhsY[2][3][BB][j][i][k];
lhsY[1][4][BB][j][i][k]= lhsY[1][4][BB][j][i][k] - coeff*lhsY[2][4][BB][j][i][k];
lhsY[1][0][CC][j][i][k] = lhsY[1][0][CC][j][i][k] - coeff*lhsY[2][0][CC][j][i][k];
lhsY[1][1][CC][j][i][k] = lhsY[1][1][CC][j][i][k] - coeff*lhsY[2][1][CC][j][i][k];
lhsY[1][2][CC][j][i][k] = lhsY[1][2][CC][j][i][k] - coeff*lhsY[2][2][CC][j][i][k];
lhsY[1][3][CC][j][i][k] = lhsY[1][3][CC][j][i][k] - coeff*lhsY[2][3][CC][j][i][k];
lhsY[1][4][CC][j][i][k] = lhsY[1][4][CC][j][i][k] - coeff*lhsY[2][4][CC][j][i][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][2];
coeff = lhsY[3][2][BB][j][i][k];
lhsY[3][3][BB][j][i][k]= lhsY[3][3][BB][j][i][k] - coeff*lhsY[2][3][BB][j][i][k];
lhsY[3][4][BB][j][i][k]= lhsY[3][4][BB][j][i][k] - coeff*lhsY[2][4][BB][j][i][k];
lhsY[3][0][CC][j][i][k] = lhsY[3][0][CC][j][i][k] - coeff*lhsY[2][0][CC][j][i][k];
lhsY[3][1][CC][j][i][k] = lhsY[3][1][CC][j][i][k] - coeff*lhsY[2][1][CC][j][i][k];
lhsY[3][2][CC][j][i][k] = lhsY[3][2][CC][j][i][k] - coeff*lhsY[2][2][CC][j][i][k];
lhsY[3][3][CC][j][i][k] = lhsY[3][3][CC][j][i][k] - coeff*lhsY[2][3][CC][j][i][k];
lhsY[3][4][CC][j][i][k] = lhsY[3][4][CC][j][i][k] - coeff*lhsY[2][4][CC][j][i][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][2];
coeff = lhsY[4][2][BB][j][i][k];
lhsY[4][3][BB][j][i][k]= lhsY[4][3][BB][j][i][k] - coeff*lhsY[2][3][BB][j][i][k];
lhsY[4][4][BB][j][i][k]= lhsY[4][4][BB][j][i][k] - coeff*lhsY[2][4][BB][j][i][k];
lhsY[4][0][CC][j][i][k] = lhsY[4][0][CC][j][i][k] - coeff*lhsY[2][0][CC][j][i][k];
lhsY[4][1][CC][j][i][k] = lhsY[4][1][CC][j][i][k] - coeff*lhsY[2][1][CC][j][i][k];
lhsY[4][2][CC][j][i][k] = lhsY[4][2][CC][j][i][k] - coeff*lhsY[2][2][CC][j][i][k];
lhsY[4][3][CC][j][i][k] = lhsY[4][3][CC][j][i][k] - coeff*lhsY[2][3][CC][j][i][k];
lhsY[4][4][CC][j][i][k] = lhsY[4][4][CC][j][i][k] - coeff*lhsY[2][4][CC][j][i][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][2];
pivot = 1.00/lhsY[3][3][BB][j][i][k];
lhsY[3][4][BB][j][i][k] = lhsY[3][4][BB][j][i][k]*pivot;
lhsY[3][0][CC][j][i][k] = lhsY[3][0][CC][j][i][k]*pivot;
lhsY[3][1][CC][j][i][k] = lhsY[3][1][CC][j][i][k]*pivot;
lhsY[3][2][CC][j][i][k] = lhsY[3][2][CC][j][i][k]*pivot;
lhsY[3][3][CC][j][i][k] = lhsY[3][3][CC][j][i][k]*pivot;
lhsY[3][4][CC][j][i][k] = lhsY[3][4][CC][j][i][k]*pivot;
rhs[k][j][i][3] = rhs[k][j][i][3] *pivot;
coeff = lhsY[0][3][BB][j][i][k];
lhsY[0][4][BB][j][i][k]= lhsY[0][4][BB][j][i][k] - coeff*lhsY[3][4][BB][j][i][k];
lhsY[0][0][CC][j][i][k] = lhsY[0][0][CC][j][i][k] - coeff*lhsY[3][0][CC][j][i][k];
lhsY[0][1][CC][j][i][k] = lhsY[0][1][CC][j][i][k] - coeff*lhsY[3][1][CC][j][i][k];
lhsY[0][2][CC][j][i][k] = lhsY[0][2][CC][j][i][k] - coeff*lhsY[3][2][CC][j][i][k];
lhsY[0][3][CC][j][i][k] = lhsY[0][3][CC][j][i][k] - coeff*lhsY[3][3][CC][j][i][k];
lhsY[0][4][CC][j][i][k] = lhsY[0][4][CC][j][i][k] - coeff*lhsY[3][4][CC][j][i][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][3];
coeff = lhsY[1][3][BB][j][i][k];
lhsY[1][4][BB][j][i][k]= lhsY[1][4][BB][j][i][k] - coeff*lhsY[3][4][BB][j][i][k];
lhsY[1][0][CC][j][i][k] = lhsY[1][0][CC][j][i][k] - coeff*lhsY[3][0][CC][j][i][k];
lhsY[1][1][CC][j][i][k] = lhsY[1][1][CC][j][i][k] - coeff*lhsY[3][1][CC][j][i][k];
lhsY[1][2][CC][j][i][k] = lhsY[1][2][CC][j][i][k] - coeff*lhsY[3][2][CC][j][i][k];
lhsY[1][3][CC][j][i][k] = lhsY[1][3][CC][j][i][k] - coeff*lhsY[3][3][CC][j][i][k];
lhsY[1][4][CC][j][i][k] = lhsY[1][4][CC][j][i][k] - coeff*lhsY[3][4][CC][j][i][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][3];
coeff = lhsY[2][3][BB][j][i][k];
lhsY[2][4][BB][j][i][k]= lhsY[2][4][BB][j][i][k] - coeff*lhsY[3][4][BB][j][i][k];
lhsY[2][0][CC][j][i][k] = lhsY[2][0][CC][j][i][k] - coeff*lhsY[3][0][CC][j][i][k];
lhsY[2][1][CC][j][i][k] = lhsY[2][1][CC][j][i][k] - coeff*lhsY[3][1][CC][j][i][k];
lhsY[2][2][CC][j][i][k] = lhsY[2][2][CC][j][i][k] - coeff*lhsY[3][2][CC][j][i][k];
lhsY[2][3][CC][j][i][k] = lhsY[2][3][CC][j][i][k] - coeff*lhsY[3][3][CC][j][i][k];
lhsY[2][4][CC][j][i][k] = lhsY[2][4][CC][j][i][k] - coeff*lhsY[3][4][CC][j][i][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][3];
coeff = lhsY[4][3][BB][j][i][k];
lhsY[4][4][BB][j][i][k]= lhsY[4][4][BB][j][i][k] - coeff*lhsY[3][4][BB][j][i][k];
lhsY[4][0][CC][j][i][k] = lhsY[4][0][CC][j][i][k] - coeff*lhsY[3][0][CC][j][i][k];
lhsY[4][1][CC][j][i][k] = lhsY[4][1][CC][j][i][k] - coeff*lhsY[3][1][CC][j][i][k];
lhsY[4][2][CC][j][i][k] = lhsY[4][2][CC][j][i][k] - coeff*lhsY[3][2][CC][j][i][k];
lhsY[4][3][CC][j][i][k] = lhsY[4][3][CC][j][i][k] - coeff*lhsY[3][3][CC][j][i][k];
lhsY[4][4][CC][j][i][k] = lhsY[4][4][CC][j][i][k] - coeff*lhsY[3][4][CC][j][i][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][3];
pivot = 1.00/lhsY[4][4][BB][j][i][k];
lhsY[4][0][CC][j][i][k] = lhsY[4][0][CC][j][i][k]*pivot;
lhsY[4][1][CC][j][i][k] = lhsY[4][1][CC][j][i][k]*pivot;
lhsY[4][2][CC][j][i][k] = lhsY[4][2][CC][j][i][k]*pivot;
lhsY[4][3][CC][j][i][k] = lhsY[4][3][CC][j][i][k]*pivot;
lhsY[4][4][CC][j][i][k] = lhsY[4][4][CC][j][i][k]*pivot;
rhs[k][j][i][4] = rhs[k][j][i][4] *pivot;
coeff = lhsY[0][4][BB][j][i][k];
lhsY[0][0][CC][j][i][k] = lhsY[0][0][CC][j][i][k] - coeff*lhsY[4][0][CC][j][i][k];
lhsY[0][1][CC][j][i][k] = lhsY[0][1][CC][j][i][k] - coeff*lhsY[4][1][CC][j][i][k];
lhsY[0][2][CC][j][i][k] = lhsY[0][2][CC][j][i][k] - coeff*lhsY[4][2][CC][j][i][k];
lhsY[0][3][CC][j][i][k] = lhsY[0][3][CC][j][i][k] - coeff*lhsY[4][3][CC][j][i][k];
lhsY[0][4][CC][j][i][k] = lhsY[0][4][CC][j][i][k] - coeff*lhsY[4][4][CC][j][i][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][4];
coeff = lhsY[1][4][BB][j][i][k];
lhsY[1][0][CC][j][i][k] = lhsY[1][0][CC][j][i][k] - coeff*lhsY[4][0][CC][j][i][k];
lhsY[1][1][CC][j][i][k] = lhsY[1][1][CC][j][i][k] - coeff*lhsY[4][1][CC][j][i][k];
lhsY[1][2][CC][j][i][k] = lhsY[1][2][CC][j][i][k] - coeff*lhsY[4][2][CC][j][i][k];
lhsY[1][3][CC][j][i][k] = lhsY[1][3][CC][j][i][k] - coeff*lhsY[4][3][CC][j][i][k];
lhsY[1][4][CC][j][i][k] = lhsY[1][4][CC][j][i][k] - coeff*lhsY[4][4][CC][j][i][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][4];
coeff = lhsY[2][4][BB][j][i][k];
lhsY[2][0][CC][j][i][k] = lhsY[2][0][CC][j][i][k] - coeff*lhsY[4][0][CC][j][i][k];
lhsY[2][1][CC][j][i][k] = lhsY[2][1][CC][j][i][k] - coeff*lhsY[4][1][CC][j][i][k];
lhsY[2][2][CC][j][i][k] = lhsY[2][2][CC][j][i][k] - coeff*lhsY[4][2][CC][j][i][k];
lhsY[2][3][CC][j][i][k] = lhsY[2][3][CC][j][i][k] - coeff*lhsY[4][3][CC][j][i][k];
lhsY[2][4][CC][j][i][k] = lhsY[2][4][CC][j][i][k] - coeff*lhsY[4][4][CC][j][i][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][4];
coeff = lhsY[3][4][BB][j][i][k];
lhsY[3][0][CC][j][i][k] = lhsY[3][0][CC][j][i][k] - coeff*lhsY[4][0][CC][j][i][k];
lhsY[3][1][CC][j][i][k] = lhsY[3][1][CC][j][i][k] - coeff*lhsY[4][1][CC][j][i][k];
lhsY[3][2][CC][j][i][k] = lhsY[3][2][CC][j][i][k] - coeff*lhsY[4][2][CC][j][i][k];
lhsY[3][3][CC][j][i][k] = lhsY[3][3][CC][j][i][k] - coeff*lhsY[4][3][CC][j][i][k];
lhsY[3][4][CC][j][i][k] = lhsY[3][4][CC][j][i][k] - coeff*lhsY[4][4][CC][j][i][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][4];
}/*end j*/
}/*end i*/
}/*end k*/
//---------------------------------------------------------------------
// rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
//---------------------------------------------------------------------
//matvec_sub(lhsY[i][jsize-1][AA], rhs[k][jsize][i][k], rhs[k][jsize][i]);
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k)
#else
#pragma omp target teams distribute parallel for simd collapse(2)
#endif
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
rhs[k][jsize][i][m] = rhs[k][jsize][i][m] - lhsY[m][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[m][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[m][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[m][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[m][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
}
*/
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] - lhsY[0][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[0][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[0][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[0][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[0][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] - lhsY[1][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[1][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[1][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[1][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[1][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] - lhsY[2][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[2][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[2][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[2][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[2][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] - lhsY[3][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[3][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[3][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[3][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[3][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] - lhsY[4][0][AA][jsize][i][k]*rhs[k][jsize-1][i][0]
- lhsY[4][1][AA][jsize][i][k]*rhs[k][jsize-1][i][1]
- lhsY[4][2][AA][jsize][i][k]*rhs[k][jsize-1][i][2]
- lhsY[4][3][AA][jsize][i][k]*rhs[k][jsize-1][i][3]
- lhsY[4][4][AA][jsize][i][k]*rhs[k][jsize-1][i][4];
}
}
//---------------------------------------------------------------------
// B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
// matmul_sub(AA,i,jsize,k,c,
// $ CC,i,jsize-1,k,c,BB,i,jsize,k)
//---------------------------------------------------------------------
//matmul_sub(lhsY[jsize-1][i][AA], lhsY[k][jsize][i][k][CC], lhsY[k][i][jsize][BB]);
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k)
#else
#pragma omp target teams distribute parallel for simd collapse(2)
#endif
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsY[n][m][BB][jsize][i][k] = lhsY[n][m][BB][jsize][i][k] - lhsY[n][0][AA][jsize][i][k]*lhsY[0][m][CC][jsize-1][i][k]
- lhsY[n][1][AA][jsize][i][k]*lhsY[1][m][CC][jsize-1][i][k]
- lhsY[n][2][AA][jsize][i][k]*lhsY[2][m][CC][jsize-1][i][k]
- lhsY[n][3][AA][jsize][i][k]*lhsY[3][m][CC][jsize-1][i][k]
- lhsY[n][4][AA][jsize][i][k]*lhsY[4][m][CC][jsize-1][i][k];
}
}
*/
lhsY[0][0][BB][jsize][i][k] = lhsY[0][0][BB][jsize][i][k] - lhsY[0][0][AA][jsize][i][k]*lhsY[0][0][CC][jsize-1][i][k]
- lhsY[0][1][AA][jsize][i][k]*lhsY[1][0][CC][jsize-1][i][k]
- lhsY[0][2][AA][jsize][i][k]*lhsY[2][0][CC][jsize-1][i][k]
- lhsY[0][3][AA][jsize][i][k]*lhsY[3][0][CC][jsize-1][i][k]
- lhsY[0][4][AA][jsize][i][k]*lhsY[4][0][CC][jsize-1][i][k];
lhsY[1][0][BB][jsize][i][k] = lhsY[1][0][BB][jsize][i][k] - lhsY[1][0][AA][jsize][i][k]*lhsY[0][0][CC][jsize-1][i][k]
- lhsY[1][1][AA][jsize][i][k]*lhsY[1][0][CC][jsize-1][i][k]
- lhsY[1][2][AA][jsize][i][k]*lhsY[2][0][CC][jsize-1][i][k]
- lhsY[1][3][AA][jsize][i][k]*lhsY[3][0][CC][jsize-1][i][k]
- lhsY[1][4][AA][jsize][i][k]*lhsY[4][0][CC][jsize-1][i][k];
lhsY[2][0][BB][jsize][i][k] = lhsY[2][0][BB][jsize][i][k] - lhsY[2][0][AA][jsize][i][k]*lhsY[0][0][CC][jsize-1][i][k]
- lhsY[2][1][AA][jsize][i][k]*lhsY[1][0][CC][jsize-1][i][k]
- lhsY[2][2][AA][jsize][i][k]*lhsY[2][0][CC][jsize-1][i][k]
- lhsY[2][3][AA][jsize][i][k]*lhsY[3][0][CC][jsize-1][i][k]
- lhsY[2][4][AA][jsize][i][k]*lhsY[4][0][CC][jsize-1][i][k];
lhsY[3][0][BB][jsize][i][k] = lhsY[3][0][BB][jsize][i][k] - lhsY[3][0][AA][jsize][i][k]*lhsY[0][0][CC][jsize-1][i][k]
- lhsY[3][1][AA][jsize][i][k]*lhsY[1][0][CC][jsize-1][i][k]
- lhsY[3][2][AA][jsize][i][k]*lhsY[2][0][CC][jsize-1][i][k]
- lhsY[3][3][AA][jsize][i][k]*lhsY[3][0][CC][jsize-1][i][k]
- lhsY[3][4][AA][jsize][i][k]*lhsY[4][0][CC][jsize-1][i][k];
lhsY[4][0][BB][jsize][i][k] = lhsY[4][0][BB][jsize][i][k] - lhsY[4][0][AA][jsize][i][k]*lhsY[0][0][CC][jsize-1][i][k]
- lhsY[4][1][AA][jsize][i][k]*lhsY[1][0][CC][jsize-1][i][k]
- lhsY[4][2][AA][jsize][i][k]*lhsY[2][0][CC][jsize-1][i][k]
- lhsY[4][3][AA][jsize][i][k]*lhsY[3][0][CC][jsize-1][i][k]
- lhsY[4][4][AA][jsize][i][k]*lhsY[4][0][CC][jsize-1][i][k];
lhsY[0][1][BB][jsize][i][k] = lhsY[0][1][BB][jsize][i][k] - lhsY[0][0][AA][jsize][i][k]*lhsY[0][1][CC][jsize-1][i][k]
- lhsY[0][1][AA][jsize][i][k]*lhsY[1][1][CC][jsize-1][i][k]
- lhsY[0][2][AA][jsize][i][k]*lhsY[2][1][CC][jsize-1][i][k]
- lhsY[0][3][AA][jsize][i][k]*lhsY[3][1][CC][jsize-1][i][k]
- lhsY[0][4][AA][jsize][i][k]*lhsY[4][1][CC][jsize-1][i][k];
lhsY[1][1][BB][jsize][i][k] = lhsY[1][1][BB][jsize][i][k] - lhsY[1][0][AA][jsize][i][k]*lhsY[0][1][CC][jsize-1][i][k]
- lhsY[1][1][AA][jsize][i][k]*lhsY[1][1][CC][jsize-1][i][k]
- lhsY[1][2][AA][jsize][i][k]*lhsY[2][1][CC][jsize-1][i][k]
- lhsY[1][3][AA][jsize][i][k]*lhsY[3][1][CC][jsize-1][i][k]
- lhsY[1][4][AA][jsize][i][k]*lhsY[4][1][CC][jsize-1][i][k];
lhsY[2][1][BB][jsize][i][k] = lhsY[2][1][BB][jsize][i][k] - lhsY[2][0][AA][jsize][i][k]*lhsY[0][1][CC][jsize-1][i][k]
- lhsY[2][1][AA][jsize][i][k]*lhsY[1][1][CC][jsize-1][i][k]
- lhsY[2][2][AA][jsize][i][k]*lhsY[2][1][CC][jsize-1][i][k]
- lhsY[2][3][AA][jsize][i][k]*lhsY[3][1][CC][jsize-1][i][k]
- lhsY[2][4][AA][jsize][i][k]*lhsY[4][1][CC][jsize-1][i][k];
lhsY[3][1][BB][jsize][i][k] = lhsY[3][1][BB][jsize][i][k] - lhsY[3][0][AA][jsize][i][k]*lhsY[0][1][CC][jsize-1][i][k]
- lhsY[3][1][AA][jsize][i][k]*lhsY[1][1][CC][jsize-1][i][k]
- lhsY[3][2][AA][jsize][i][k]*lhsY[2][1][CC][jsize-1][i][k]
- lhsY[3][3][AA][jsize][i][k]*lhsY[3][1][CC][jsize-1][i][k]
- lhsY[3][4][AA][jsize][i][k]*lhsY[4][1][CC][jsize-1][i][k];
lhsY[4][1][BB][jsize][i][k] = lhsY[4][1][BB][jsize][i][k] - lhsY[4][0][AA][jsize][i][k]*lhsY[0][1][CC][jsize-1][i][k]
- lhsY[4][1][AA][jsize][i][k]*lhsY[1][1][CC][jsize-1][i][k]
- lhsY[4][2][AA][jsize][i][k]*lhsY[2][1][CC][jsize-1][i][k]
- lhsY[4][3][AA][jsize][i][k]*lhsY[3][1][CC][jsize-1][i][k]
- lhsY[4][4][AA][jsize][i][k]*lhsY[4][1][CC][jsize-1][i][k];
lhsY[0][2][BB][jsize][i][k] = lhsY[0][2][BB][jsize][i][k] - lhsY[0][0][AA][jsize][i][k]*lhsY[0][2][CC][jsize-1][i][k]
- lhsY[0][1][AA][jsize][i][k]*lhsY[1][2][CC][jsize-1][i][k]
- lhsY[0][2][AA][jsize][i][k]*lhsY[2][2][CC][jsize-1][i][k]
- lhsY[0][3][AA][jsize][i][k]*lhsY[3][2][CC][jsize-1][i][k]
- lhsY[0][4][AA][jsize][i][k]*lhsY[4][2][CC][jsize-1][i][k];
lhsY[1][2][BB][jsize][i][k] = lhsY[1][2][BB][jsize][i][k] - lhsY[1][0][AA][jsize][i][k]*lhsY[0][2][CC][jsize-1][i][k]
- lhsY[1][1][AA][jsize][i][k]*lhsY[1][2][CC][jsize-1][i][k]
- lhsY[1][2][AA][jsize][i][k]*lhsY[2][2][CC][jsize-1][i][k]
- lhsY[1][3][AA][jsize][i][k]*lhsY[3][2][CC][jsize-1][i][k]
- lhsY[1][4][AA][jsize][i][k]*lhsY[4][2][CC][jsize-1][i][k];
lhsY[2][2][BB][jsize][i][k] = lhsY[2][2][BB][jsize][i][k] - lhsY[2][0][AA][jsize][i][k]*lhsY[0][2][CC][jsize-1][i][k]
- lhsY[2][1][AA][jsize][i][k]*lhsY[1][2][CC][jsize-1][i][k]
- lhsY[2][2][AA][jsize][i][k]*lhsY[2][2][CC][jsize-1][i][k]
- lhsY[2][3][AA][jsize][i][k]*lhsY[3][2][CC][jsize-1][i][k]
- lhsY[2][4][AA][jsize][i][k]*lhsY[4][2][CC][jsize-1][i][k];
lhsY[3][2][BB][jsize][i][k] = lhsY[3][2][BB][jsize][i][k] - lhsY[3][0][AA][jsize][i][k]*lhsY[0][2][CC][jsize-1][i][k]
- lhsY[3][1][AA][jsize][i][k]*lhsY[1][2][CC][jsize-1][i][k]
- lhsY[3][2][AA][jsize][i][k]*lhsY[2][2][CC][jsize-1][i][k]
- lhsY[3][3][AA][jsize][i][k]*lhsY[3][2][CC][jsize-1][i][k]
- lhsY[3][4][AA][jsize][i][k]*lhsY[4][2][CC][jsize-1][i][k];
lhsY[4][2][BB][jsize][i][k] = lhsY[4][2][BB][jsize][i][k] - lhsY[4][0][AA][jsize][i][k]*lhsY[0][2][CC][jsize-1][i][k]
- lhsY[4][1][AA][jsize][i][k]*lhsY[1][2][CC][jsize-1][i][k]
- lhsY[4][2][AA][jsize][i][k]*lhsY[2][2][CC][jsize-1][i][k]
- lhsY[4][3][AA][jsize][i][k]*lhsY[3][2][CC][jsize-1][i][k]
- lhsY[4][4][AA][jsize][i][k]*lhsY[4][2][CC][jsize-1][i][k];
lhsY[0][3][BB][jsize][i][k] = lhsY[0][3][BB][jsize][i][k] - lhsY[0][0][AA][jsize][i][k]*lhsY[0][3][CC][jsize-1][i][k]
- lhsY[0][1][AA][jsize][i][k]*lhsY[1][3][CC][jsize-1][i][k]
- lhsY[0][2][AA][jsize][i][k]*lhsY[2][3][CC][jsize-1][i][k]
- lhsY[0][3][AA][jsize][i][k]*lhsY[3][3][CC][jsize-1][i][k]
- lhsY[0][4][AA][jsize][i][k]*lhsY[4][3][CC][jsize-1][i][k];
lhsY[1][3][BB][jsize][i][k] = lhsY[1][3][BB][jsize][i][k] - lhsY[1][0][AA][jsize][i][k]*lhsY[0][3][CC][jsize-1][i][k]
- lhsY[1][1][AA][jsize][i][k]*lhsY[1][3][CC][jsize-1][i][k]
- lhsY[1][2][AA][jsize][i][k]*lhsY[2][3][CC][jsize-1][i][k]
- lhsY[1][3][AA][jsize][i][k]*lhsY[3][3][CC][jsize-1][i][k]
- lhsY[1][4][AA][jsize][i][k]*lhsY[4][3][CC][jsize-1][i][k];
lhsY[2][3][BB][jsize][i][k] = lhsY[2][3][BB][jsize][i][k] - lhsY[2][0][AA][jsize][i][k]*lhsY[0][3][CC][jsize-1][i][k]
- lhsY[2][1][AA][jsize][i][k]*lhsY[1][3][CC][jsize-1][i][k]
- lhsY[2][2][AA][jsize][i][k]*lhsY[2][3][CC][jsize-1][i][k]
- lhsY[2][3][AA][jsize][i][k]*lhsY[3][3][CC][jsize-1][i][k]
- lhsY[2][4][AA][jsize][i][k]*lhsY[4][3][CC][jsize-1][i][k];
lhsY[3][3][BB][jsize][i][k] = lhsY[3][3][BB][jsize][i][k] - lhsY[3][0][AA][jsize][i][k]*lhsY[0][3][CC][jsize-1][i][k]
- lhsY[3][1][AA][jsize][i][k]*lhsY[1][3][CC][jsize-1][i][k]
- lhsY[3][2][AA][jsize][i][k]*lhsY[2][3][CC][jsize-1][i][k]
- lhsY[3][3][AA][jsize][i][k]*lhsY[3][3][CC][jsize-1][i][k]
- lhsY[3][4][AA][jsize][i][k]*lhsY[4][3][CC][jsize-1][i][k];
lhsY[4][3][BB][jsize][i][k] = lhsY[4][3][BB][jsize][i][k] - lhsY[4][0][AA][jsize][i][k]*lhsY[0][3][CC][jsize-1][i][k]
- lhsY[4][1][AA][jsize][i][k]*lhsY[1][3][CC][jsize-1][i][k]
- lhsY[4][2][AA][jsize][i][k]*lhsY[2][3][CC][jsize-1][i][k]
- lhsY[4][3][AA][jsize][i][k]*lhsY[3][3][CC][jsize-1][i][k]
- lhsY[4][4][AA][jsize][i][k]*lhsY[4][3][CC][jsize-1][i][k];
lhsY[0][4][BB][jsize][i][k] = lhsY[0][4][BB][jsize][i][k] - lhsY[0][0][AA][jsize][i][k]*lhsY[0][4][CC][jsize-1][i][k]
- lhsY[0][1][AA][jsize][i][k]*lhsY[1][4][CC][jsize-1][i][k]
- lhsY[0][2][AA][jsize][i][k]*lhsY[2][4][CC][jsize-1][i][k]
- lhsY[0][3][AA][jsize][i][k]*lhsY[3][4][CC][jsize-1][i][k]
- lhsY[0][4][AA][jsize][i][k]*lhsY[4][4][CC][jsize-1][i][k];
lhsY[1][4][BB][jsize][i][k] = lhsY[1][4][BB][jsize][i][k] - lhsY[1][0][AA][jsize][i][k]*lhsY[0][4][CC][jsize-1][i][k]
- lhsY[1][1][AA][jsize][i][k]*lhsY[1][4][CC][jsize-1][i][k]
- lhsY[1][2][AA][jsize][i][k]*lhsY[2][4][CC][jsize-1][i][k]
- lhsY[1][3][AA][jsize][i][k]*lhsY[3][4][CC][jsize-1][i][k]
- lhsY[1][4][AA][jsize][i][k]*lhsY[4][4][CC][jsize-1][i][k];
lhsY[2][4][BB][jsize][i][k] = lhsY[2][4][BB][jsize][i][k] - lhsY[2][0][AA][jsize][i][k]*lhsY[0][4][CC][jsize-1][i][k]
- lhsY[2][1][AA][jsize][i][k]*lhsY[1][4][CC][jsize-1][i][k]
- lhsY[2][2][AA][jsize][i][k]*lhsY[2][4][CC][jsize-1][i][k]
- lhsY[2][3][AA][jsize][i][k]*lhsY[3][4][CC][jsize-1][i][k]
- lhsY[2][4][AA][jsize][i][k]*lhsY[4][4][CC][jsize-1][i][k];
lhsY[3][4][BB][jsize][i][k] = lhsY[3][4][BB][jsize][i][k] - lhsY[3][0][AA][jsize][i][k]*lhsY[0][4][CC][jsize-1][i][k]
- lhsY[3][1][AA][jsize][i][k]*lhsY[1][4][CC][jsize-1][i][k]
- lhsY[3][2][AA][jsize][i][k]*lhsY[2][4][CC][jsize-1][i][k]
- lhsY[3][3][AA][jsize][i][k]*lhsY[3][4][CC][jsize-1][i][k]
- lhsY[3][4][AA][jsize][i][k]*lhsY[4][4][CC][jsize-1][i][k];
lhsY[4][4][BB][jsize][i][k] = lhsY[4][4][BB][jsize][i][k] - lhsY[4][0][AA][jsize][i][k]*lhsY[0][4][CC][jsize-1][i][k]
- lhsY[4][1][AA][jsize][i][k]*lhsY[1][4][CC][jsize-1][i][k]
- lhsY[4][2][AA][jsize][i][k]*lhsY[2][4][CC][jsize-1][i][k]
- lhsY[4][3][AA][jsize][i][k]*lhsY[3][4][CC][jsize-1][i][k]
- lhsY[4][4][AA][jsize][i][k]*lhsY[4][4][CC][jsize-1][i][k];
}
}
//---------------------------------------------------------------------
// multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
//---------------------------------------------------------------------
//binvrhs( lhsY[i][jsize][BB], rhs[k][jsize][i][k] );
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,k,pivot,coeff)
#else
#pragma omp target teams distribute parallel for simd private(pivot,coeff) collapse(2)
#endif
for (i = 1; i <= gp02; i++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(pivot,coeff)
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsY[m][m][BB][jsize][i][k];
for(n = m+1; n < 5; n++){
lhsY[m][n][BB][jsize][i][k] = lhsY[m][n][BB][jsize][i][k]*pivot;
}
rhs[k][jsize][i][m] = rhs[k][jsize][i][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsY[n][m][BB][jsize][i][k];
for(z = m+1; z < 5; z++){
lhsY[n][z][BB][jsize][i][k] = lhsY[n][z][BB][jsize][i][k] - coeff*lhsY[m][z][BB][jsize][i][k];
}
rhs[k][jsize][i][n] = rhs[k][jsize][i][n] - coeff*rhs[k][jsize][i][m];
}
}
}
*/
pivot = 1.00/lhsY[0][0][BB][jsize][i][k];
lhsY[0][1][BB][jsize][i][k] = lhsY[0][1][BB][jsize][i][k]*pivot;
lhsY[0][2][BB][jsize][i][k] = lhsY[0][2][BB][jsize][i][k]*pivot;
lhsY[0][3][BB][jsize][i][k] = lhsY[0][3][BB][jsize][i][k]*pivot;
lhsY[0][4][BB][jsize][i][k] = lhsY[0][4][BB][jsize][i][k]*pivot;
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] *pivot;
coeff = lhsY[1][0][BB][jsize][i][k];
lhsY[1][1][BB][jsize][i][k]= lhsY[1][1][BB][jsize][i][k] - coeff*lhsY[0][1][BB][jsize][i][k];
lhsY[1][2][BB][jsize][i][k]= lhsY[1][2][BB][jsize][i][k] - coeff*lhsY[0][2][BB][jsize][i][k];
lhsY[1][3][BB][jsize][i][k]= lhsY[1][3][BB][jsize][i][k] - coeff*lhsY[0][3][BB][jsize][i][k];
lhsY[1][4][BB][jsize][i][k]= lhsY[1][4][BB][jsize][i][k] - coeff*lhsY[0][4][BB][jsize][i][k];
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] - coeff*rhs[k][jsize][i][0];
coeff = lhsY[2][0][BB][jsize][i][k];
lhsY[2][1][BB][jsize][i][k]= lhsY[2][1][BB][jsize][i][k] - coeff*lhsY[0][1][BB][jsize][i][k];
lhsY[2][2][BB][jsize][i][k]= lhsY[2][2][BB][jsize][i][k] - coeff*lhsY[0][2][BB][jsize][i][k];
lhsY[2][3][BB][jsize][i][k]= lhsY[2][3][BB][jsize][i][k] - coeff*lhsY[0][3][BB][jsize][i][k];
lhsY[2][4][BB][jsize][i][k]= lhsY[2][4][BB][jsize][i][k] - coeff*lhsY[0][4][BB][jsize][i][k];
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] - coeff*rhs[k][jsize][i][0];
coeff = lhsY[3][0][BB][jsize][i][k];
lhsY[3][1][BB][jsize][i][k]= lhsY[3][1][BB][jsize][i][k] - coeff*lhsY[0][1][BB][jsize][i][k];
lhsY[3][2][BB][jsize][i][k]= lhsY[3][2][BB][jsize][i][k] - coeff*lhsY[0][2][BB][jsize][i][k];
lhsY[3][3][BB][jsize][i][k]= lhsY[3][3][BB][jsize][i][k] - coeff*lhsY[0][3][BB][jsize][i][k];
lhsY[3][4][BB][jsize][i][k]= lhsY[3][4][BB][jsize][i][k] - coeff*lhsY[0][4][BB][jsize][i][k];
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] - coeff*rhs[k][jsize][i][0];
coeff = lhsY[4][0][BB][jsize][i][k];
lhsY[4][1][BB][jsize][i][k]= lhsY[4][1][BB][jsize][i][k] - coeff*lhsY[0][1][BB][jsize][i][k];
lhsY[4][2][BB][jsize][i][k]= lhsY[4][2][BB][jsize][i][k] - coeff*lhsY[0][2][BB][jsize][i][k];
lhsY[4][3][BB][jsize][i][k]= lhsY[4][3][BB][jsize][i][k] - coeff*lhsY[0][3][BB][jsize][i][k];
lhsY[4][4][BB][jsize][i][k]= lhsY[4][4][BB][jsize][i][k] - coeff*lhsY[0][4][BB][jsize][i][k];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] - coeff*rhs[k][jsize][i][0];
pivot = 1.00/lhsY[1][1][BB][jsize][i][k];
lhsY[1][2][BB][jsize][i][k] = lhsY[1][2][BB][jsize][i][k]*pivot;
lhsY[1][3][BB][jsize][i][k] = lhsY[1][3][BB][jsize][i][k]*pivot;
lhsY[1][4][BB][jsize][i][k] = lhsY[1][4][BB][jsize][i][k]*pivot;
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] *pivot;
coeff = lhsY[0][1][BB][jsize][i][k];
lhsY[0][2][BB][jsize][i][k]= lhsY[0][2][BB][jsize][i][k] - coeff*lhsY[1][2][BB][jsize][i][k];
lhsY[0][3][BB][jsize][i][k]= lhsY[0][3][BB][jsize][i][k] - coeff*lhsY[1][3][BB][jsize][i][k];
lhsY[0][4][BB][jsize][i][k]= lhsY[0][4][BB][jsize][i][k] - coeff*lhsY[1][4][BB][jsize][i][k];
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] - coeff*rhs[k][jsize][i][1];
coeff = lhsY[2][1][BB][jsize][i][k];
lhsY[2][2][BB][jsize][i][k]= lhsY[2][2][BB][jsize][i][k] - coeff*lhsY[1][2][BB][jsize][i][k];
lhsY[2][3][BB][jsize][i][k]= lhsY[2][3][BB][jsize][i][k] - coeff*lhsY[1][3][BB][jsize][i][k];
lhsY[2][4][BB][jsize][i][k]= lhsY[2][4][BB][jsize][i][k] - coeff*lhsY[1][4][BB][jsize][i][k];
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] - coeff*rhs[k][jsize][i][1];
coeff = lhsY[3][1][BB][jsize][i][k];
lhsY[3][2][BB][jsize][i][k]= lhsY[3][2][BB][jsize][i][k] - coeff*lhsY[1][2][BB][jsize][i][k];
lhsY[3][3][BB][jsize][i][k]= lhsY[3][3][BB][jsize][i][k] - coeff*lhsY[1][3][BB][jsize][i][k];
lhsY[3][4][BB][jsize][i][k]= lhsY[3][4][BB][jsize][i][k] - coeff*lhsY[1][4][BB][jsize][i][k];
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] - coeff*rhs[k][jsize][i][1];
coeff = lhsY[4][1][BB][jsize][i][k];
lhsY[4][2][BB][jsize][i][k]= lhsY[4][2][BB][jsize][i][k] - coeff*lhsY[1][2][BB][jsize][i][k];
lhsY[4][3][BB][jsize][i][k]= lhsY[4][3][BB][jsize][i][k] - coeff*lhsY[1][3][BB][jsize][i][k];
lhsY[4][4][BB][jsize][i][k]= lhsY[4][4][BB][jsize][i][k] - coeff*lhsY[1][4][BB][jsize][i][k];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] - coeff*rhs[k][jsize][i][1];
pivot = 1.00/lhsY[2][2][BB][jsize][i][k];
lhsY[2][3][BB][jsize][i][k] = lhsY[2][3][BB][jsize][i][k]*pivot;
lhsY[2][4][BB][jsize][i][k] = lhsY[2][4][BB][jsize][i][k]*pivot;
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] *pivot;
coeff = lhsY[0][2][BB][jsize][i][k];
lhsY[0][3][BB][jsize][i][k]= lhsY[0][3][BB][jsize][i][k] - coeff*lhsY[2][3][BB][jsize][i][k];
lhsY[0][4][BB][jsize][i][k]= lhsY[0][4][BB][jsize][i][k] - coeff*lhsY[2][4][BB][jsize][i][k];
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] - coeff*rhs[k][jsize][i][2];
coeff = lhsY[1][2][BB][jsize][i][k];
lhsY[1][3][BB][jsize][i][k]= lhsY[1][3][BB][jsize][i][k] - coeff*lhsY[2][3][BB][jsize][i][k];
lhsY[1][4][BB][jsize][i][k]= lhsY[1][4][BB][jsize][i][k] - coeff*lhsY[2][4][BB][jsize][i][k];
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] - coeff*rhs[k][jsize][i][2];
coeff = lhsY[3][2][BB][jsize][i][k];
lhsY[3][3][BB][jsize][i][k]= lhsY[3][3][BB][jsize][i][k] - coeff*lhsY[2][3][BB][jsize][i][k];
lhsY[3][4][BB][jsize][i][k]= lhsY[3][4][BB][jsize][i][k] - coeff*lhsY[2][4][BB][jsize][i][k];
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] - coeff*rhs[k][jsize][i][2];
coeff = lhsY[4][2][BB][jsize][i][k];
lhsY[4][3][BB][jsize][i][k]= lhsY[4][3][BB][jsize][i][k] - coeff*lhsY[2][3][BB][jsize][i][k];
lhsY[4][4][BB][jsize][i][k]= lhsY[4][4][BB][jsize][i][k] - coeff*lhsY[2][4][BB][jsize][i][k];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] - coeff*rhs[k][jsize][i][2];
pivot = 1.00/lhsY[3][3][BB][jsize][i][k];
lhsY[3][4][BB][jsize][i][k] = lhsY[3][4][BB][jsize][i][k]*pivot;
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] *pivot;
coeff = lhsY[0][3][BB][jsize][i][k];
lhsY[0][4][BB][jsize][i][k]= lhsY[0][4][BB][jsize][i][k] - coeff*lhsY[3][4][BB][jsize][i][k];
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] - coeff*rhs[k][jsize][i][3];
coeff = lhsY[1][3][BB][jsize][i][k];
lhsY[1][4][BB][jsize][i][k]= lhsY[1][4][BB][jsize][i][k] - coeff*lhsY[3][4][BB][jsize][i][k];
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] - coeff*rhs[k][jsize][i][3];
coeff = lhsY[2][3][BB][jsize][i][k];
lhsY[2][4][BB][jsize][i][k]= lhsY[2][4][BB][jsize][i][k] - coeff*lhsY[3][4][BB][jsize][i][k];
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] - coeff*rhs[k][jsize][i][3];
coeff = lhsY[4][3][BB][jsize][i][k];
lhsY[4][4][BB][jsize][i][k]= lhsY[4][4][BB][jsize][i][k] - coeff*lhsY[3][4][BB][jsize][i][k];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] - coeff*rhs[k][jsize][i][3];
pivot = 1.00/lhsY[4][4][BB][jsize][i][k];
rhs[k][jsize][i][4] = rhs[k][jsize][i][4] *pivot;
coeff = lhsY[0][4][BB][jsize][i][k];
rhs[k][jsize][i][0] = rhs[k][jsize][i][0] - coeff*rhs[k][jsize][i][4];
coeff = lhsY[1][4][BB][jsize][i][k];
rhs[k][jsize][i][1] = rhs[k][jsize][i][1] - coeff*rhs[k][jsize][i][4];
coeff = lhsY[2][4][BB][jsize][i][k];
rhs[k][jsize][i][2] = rhs[k][jsize][i][2] - coeff*rhs[k][jsize][i][4];
coeff = lhsY[3][4][BB][jsize][i][k];
rhs[k][jsize][i][3] = rhs[k][jsize][i][3] - coeff*rhs[k][jsize][i][4];
}
}
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(jsize)=rhs(jsize)
// else assume U(jsize) is loaded in un pack backsub_info
// so just use it
// after u(jstart) will be sent to next cell
//---------------------------------------------------------------------
for (j = jsize-1; j >= 0; j--) {
#pragma omp target teams distribute parallel for collapse(2) private(i,k,m,n)
for (k = 1; k <= gp22; k++) {
for (i = 1; i <= gp02; i++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhsY[m][n][CC][j][i][k]*rhs[k][j+1][i][n];
}
}
}
}
}
}/*end omp target data */
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels)
{
register ssize_t
i;
assert(pixels != (PixelChannels **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *image)
{
PixelChannels
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(PixelChannels **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns,
sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) image->columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double)
value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
PixelTrait traits = GetPixelChannelTraits(next,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#include "vec2.h"
#include "drone.h"
#include "comms.h"
//#include "video.h"
#include <omp.h>
omp_lock_t writelock;
time_t t;
uint64_t iterations = 10000;
double dt = 1E-3; //seconds
double error = -1.0;
double rates[] = {
1E-2,
2.5E-2,
5E-2,
7.5E-2,
1E-1,
2.5E-1,
5E-1,
7.5E-1,
1.0,
1.5,
2.0,
2.5,
3.0,
3.5,
4.0,
4.5,
5.0,
5.5,
6.0,
6.5,
7.0,
7.5,
8.0,
8.5,
9.0,
9.5,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0}; //msg/s
double speed = 20.0;
uint64_t len_rates = sizeof(rates) / sizeof(double);
int si = 0;
double rate = 1.0;
char prob = 'A';
RFsystem sys;
double l = 1000.0;
int main(int argc, char *argv[])
{
if (argc > 1)
{
prob = argv[1][0];
}
if (argc > 2)
{
error = atof(argv[2]);
}
if (argc > 3)
{
l = 1000.0 * atof(argv[3]);
}
if (argc > 4)
{
speed = atof(argv[4]);
}
printf("Error: %.3f\n", error);
switch (prob)
{
case 'E':
printf("Wi-Fi beacons\n");
sys = WI_FI;
break;
case 'C':
printf("ADS-B\n");
sys = ADS_B;
break;
default:
sys = NO_LOSS;
printf("No loss\n");
}
printf("Loss: %.3f\n", l);
printf("Speed: %.3f\n", speed);
double collisions[len_rates];
for (uint64_t k = 0; k < len_rates; k++)
{
collisions[k] = 0.0;
}
omp_init_lock(&writelock);
t = time(NULL);
srand(t);
//speed is in m/s
vec2 p1 = {500.0, 500.0};
vec2 p2 = {1000.0, 1000.0};
vec2 p3 = {000.0, 1000.0};
// vec2 p4 = {0.0, 0.0};
printf("Rate:\tPcrash:\n");
#pragma omp parallel
{
for (uint64_t i = 0; i < len_rates; i++)
{
#pragma omp for
for (uint64_t it = 0; it < iterations; it++)
{
Drone d1 = DR_newDrone(0.0, 0.0, speed, 0.0, 1);
Drone d2 = DR_newDrone(1000.0, 0.0, speed, 0.0, 1);
// Drone d3 = DR_newDrone(500.0, 999.0, 20.0, 0.0, 1);
DR_push_waypoint(&d1, p2);
DR_push_waypoint(&d2, p3);
// DR_push_waypoint(&d3, p4);
DR_push_waypoint(&d1, p1);
DR_push_waypoint(&d2, p1);
// DR_push_waypoint(&d3, p1);
rate = rates[i];
bool running = true;
double timer = 0;
while (running)
{
if (timer >= 1 / rate)
{
if (COM_broadcast(d1.position, d2.position, sys, l))
{
DR_avoid(&d2, &d1, error);
DR_avoid(&d1, &d2, error);
// DR_avoid(&d3, &d1, error);
// DR_avoid(&d1, &d3, error);
// DR_avoid(&d2, &d3, error);
// DR_avoid(&d3, &d2, error);
// DR_stopAndWait(&d1, &d2, error);
// DR_stopAndWait(&d2, &d1, error);
}
timer = 0;
}
DR_move(&d1, dt);
DR_move(&d2, dt);
// DR_move(&d3, dt);
if (d1.waypoints[d1.curr_wp].x == 0 && d1.waypoints[d1.curr_wp].y == 0)
{
running = false;
}
if (d2.waypoints[d2.curr_wp].x == 0 && d2.waypoints[d2.curr_wp].y == 0)
{
running = false;
}
// if (d3.waypoints[d3.curr_wp].x == 0 && d3.waypoints[d3.curr_wp].y == 0)
// {
// running = false;
// }
if (v2_distance(d1.position, d2.position) < (d1.size + d2.size)) // ||
//v2_distance(d1.position, d3.position) < (d1.size + d3.size)))// ||
//v2_distance(d3.position, d2.position) < (d3.size + d2.size))
{
omp_set_lock(&writelock);
collisions[i] += 1;
omp_unset_lock(&writelock);
running = false;
}
timer += dt;
}
} //iterations
if (collisions[i] == 0.0)
{
printf("%.3f\n", rates[i]);
break;
}
} //rates
} //openmp
FILE *results = fopen("results_speed_loss_avoid.txt", "a");
fprintf(results, "Error: %.3f\n", error);
fprintf(results, "Loss: %.3f\n", l);
fprintf(results, "Speed: %.3f\n", speed);
switch (prob)
{
case 'E':
fprintf(results, "Wi-Fi beacons\n");
break;
case 'C':
fprintf(results, "ADS-B\n");
break;
default:
fprintf(results, "No loss\n");
}
for (uint64_t k = 0; k < len_rates; k++)
{
printf("%.3f\t%.6f\n", rates[k], collisions[k] / iterations);
fprintf(results, "%.3f\t%.10f\n", rates[k], collisions[k] / iterations);
}
fclose(results);
return 0;
}
|
List.h | /** 2017 Neil Edelman, distributed under the terms of the MIT License;
see readme.txt, or \url{ https://opensource.org/licenses/MIT }.
{<T>List} is a doubly-linked-list of {<T>Link}, of which data of type, {<T>},
must be set using {LIST_TYPE}. This is an abstract data structure requiring
{<T>Link} storage, and can possibly store this as a sub-structure of larger
data-type. Provides \see{<T>LinkMigrate} for self-referencing pointers that
change as the result of a memory relocation within the list.
Supports one to four different orders in the same type. The preprocessor
macros are all undefined at the end of the file for convenience.
@param LIST_NAME, LIST_TYPE
The name that literally becomes {<T>}, and a valid type associated therewith,
accessible to the compiler at the time of inclusion; should be conformant to
naming and to the maximum available length of identifiers. Must each be
present before including.
@param LIST_COMPARATOR or LIST_U[A-D]_NAME, LIST_U[A-D]_COMPARATOR
Each {LIST_U[A-D]_NAME} literally becomes, {<U>}, an order, and optional
comparator, {LIST_U[A-D]_COMPARATOR}, an equivalence relation function
implementing {<T>Comparator}. Not defining this implies one anonymous order
which one can set a comparator using {LIST_COMPARATOR}; {<U>} will be empty in
this case.
@param LIST_TO_STRING
Optional print function implementing {<T>ToString}; makes available
\see{<T>List<U>ToString}.
@param LIST_OPENMP
Tries to parallelise using {OpenMP}, \url{ http://www.openmp.org/ }. This is
limited to some, usually multi-order, functions.
@param LIST_TEST
Unit testing framework using {<T>ListTest}, included in a separate header,
{../test/ListTest.h}. Must be defined equal to a (random) filler, satisfying
{<T>Action}. If {NDEBUG} is not defined, turns on {assert} private function
integrity testing. Requires {LIST_TO_STRING}.
@title List.h
@author Neil
@std C89/90
@version
2018-04 {<T>ListNode} has been shortened to {<T>Link}, thus potential
namespace violations doubled. Two dynamic memory allocations have been
collapsed into one by making it a non-pointer at the cost of readability.
These changes are good for more complex data structures #including list.
@since
2018-02 Eliminated the need for unnecessarily {<T>List}. Now one
must initialise static variables with {<T>ListClear}. Eliminated
{LIST_STATIC_SORT}.
2017-12 Type information on backing.
2017-10 Anonymous orders.
2017-07 Made migrate simpler.
2017-06 Split Add into Push and Unshift.
2017-05 Separated from backing.
@fixme {GCC}: {#pragma GCC diagnostic ignored "-Wconversion"}; libc 4.2
{assert} warnings on {LIST_TEST}.
@fixme {MSVC}: {#pragma warning(disable: x)} where {x} is: 4464 contains '..'
uhm, thanks?; 4706 not {Java}; 4710, 4711 inlined info; 4820 padding info;
4996 not {C++11}.
@fixme {clang}: {#pragma clang diagnostic ignored "-Wx"} where {x} is:
{padded}; {documentation}; {documentation-unknown-command} it's not quite
{clang-tags}; 3.8 {disabled-macro-expansion} on {toupper} in {LIST_TEST}.
@fixme Non-const void pointers in {<T>List<U>BiAction} are not effective; have
an interface. While we're at it, {<T>LinkMigrate} should be an interface.
Everything should be an interface. */
/* 2017-05-12 tested with:
gcc version 4.2.1 (Apple Inc. build 5666) (dot 3)
Apple clang version 1.7 (tags/Apple/clang-77) (based on LLVM 2.9svn)
gcc version 4.9.2 (Debian 4.9.2-10)
Microsoft Visual Studio Enterprise 2015 Version 14.0.25424.00 Update 3
Borland 10.1 Embarcadero C++ 7.20 for Win32
MinGW gcc version 4.9.3 (GCC) Win32
gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.4)
clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final) */
/* Original #include in the user's C file, and not from calling recursively;
all "LIST_*" names are assumed to be reserved. */
#if !defined(LIST_U_NAME) /* <-- !LIST_U_NAME */
#include <stddef.h> /* ptrdiff_t offset_of */
#include <assert.h> /* assert */
#ifdef LIST_TO_STRING /* <-- print */
#include <stdio.h> /* sprintf */
#include <string.h> /* strlen */
#endif /* print --> */
/* Check defines; {[A, D]} is just arbitrary; more could be added. */
#ifndef LIST_NAME
#error List generic LIST_NAME undefined.
#endif
#ifndef LIST_TYPE
#error List generic LIST_TYPE undefined.
#endif
#if defined(LIST_UA_COMPARATOR) && !defined(LIST_UA_NAME)
#error List: LIST_UA_COMPARATOR requires LIST_UA_NAME.
#endif
#if defined(LIST_UB_COMPARATOR) && !defined(LIST_UB_NAME)
#error List: LIST_UB_COMPARATOR requires LIST_UB_NAME.
#endif
#if defined(LIST_UC_COMPARATOR) && !defined(LIST_UC_NAME)
#error List: LIST_UC_COMPARATOR requires LIST_UC_NAME.
#endif
#if defined(LIST_UD_COMPARATOR) && !defined(LIST_UD_NAME)
#error List: LIST_UD_COMPARATOR requires LIST_UD_NAME.
#endif
/* Anonymous one-order implicit macro into {UA} for convenience and brevity. */
#if !defined(LIST_UA_NAME) && !defined(LIST_UB_NAME) \
&& !defined(LIST_UC_NAME) && !defined(LIST_UD_NAME) /* <-- anon */
#define LIST_U_ANONYMOUS
#define LIST_UA_NAME
#ifdef LIST_COMPARATOR
#define LIST_UA_COMPARATOR LIST_COMPARATOR
#endif
#else /* anon --><-- !anon */
#ifdef LIST_COMPARATOR
#error List: LIST_COMPARATOR can only be anonymous; use LIST_U[A-D]_COMPARATOR.
#endif
#endif /* !anon --> */
#if defined(LIST_TEST) && !defined(LIST_TO_STRING) /* <-- error */
#error LIST_TEST requires LIST_TO_STRING.
#endif /* error --> */
#if !defined(LIST_TEST) && !defined(NDEBUG) /* <-- !assert */
#define LIST_NDEBUG
#define NDEBUG
#endif /* !assert --> */
#if defined(LIST_UA_COMPARATOR) || defined(LIST_UB_COMPARATOR) \
|| defined(LIST_UC_COMPARATOR) || defined(LIST_UD_COMPARATOR) /* <-- some */
#define LIST_SOME_COMPARATOR
#endif /* some --> */
/* Generics using the preprocessor;
\url{ http://stackoverflow.com/questions/16522341/pseudo-generics-in-c }. */
#ifdef CAT
#undef CAT
#endif
#ifdef CAT_
#undef CAT_
#endif
#ifdef PCAT
#undef PCAT
#endif
#ifdef PCAT_
#undef PCAT_
#endif
#ifdef T
#undef T
#endif
#ifdef T_
#undef T_
#endif
#ifdef PT_
#undef PT_
#endif
#define CAT_(x, y) x ## y
#define CAT(x, y) CAT_(x, y)
#define PCAT_(x, y) x ## _ ## y
#define PCAT(x, y) PCAT_(x, y)
#define T_(thing) CAT(LIST_NAME, thing)
#define PT_(thing) PCAT(list, PCAT(LIST_NAME, thing)) /* {private <T>} */
/* Troubles with this line? check to ensure that {LIST_TYPE} is a valid type,
whose definition is placed above {#include "List.h"}. */
typedef LIST_TYPE PT_(Type);
#define T PT_(Type)
/* [A, D] */
#ifdef UA_
#undef UA_
#undef T_UA_
#undef PT_UA_
#endif
#ifdef UB_
#undef UB_
#undef T_UB_
#undef PT_UB_
#endif
#ifdef UC_
#undef UC_
#undef T_UC_
#undef PT_UC_
#endif
#ifdef UD_
#undef UD_
#undef T_UD_
#undef PT_UD_
#endif
/* Data exclusively, public f'ns, and private f'ns. */
#ifdef LIST_U_ANONYMOUS /* <-- anon: We are using C89; "Empty macro arguments
were standardized in C99," workaround. */
#define UA_(thing) PCAT(anonymous, thing)
#define T_UA_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), thing2)
#define PT_UA_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
CAT(_, thing2)))
#else /* anon --><-- !anon */
#ifdef LIST_UA_NAME
#define UA_(thing) PCAT(LIST_UA_NAME, thing)
#define T_UA_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \
CAT(LIST_UA_NAME, thing2))
#define PT_UA_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
PCAT(LIST_UA_NAME, thing2)))
#endif
#ifdef LIST_UB_NAME
#define UB_(thing) PCAT(LIST_UB_NAME, thing)
#define T_UB_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \
CAT(LIST_UB_NAME, thing2))
#define PT_UB_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
PCAT(LIST_UB_NAME, thing2)))
#endif
#ifdef LIST_UC_NAME
#define UC_(thing) PCAT(LIST_UC_NAME, thing)
#define T_UC_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \
CAT(LIST_UC_NAME, thing2))
#define PT_UC_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
PCAT(LIST_UC_NAME, thing2)))
#endif
#ifdef LIST_UD_NAME
#define UD_(thing) PCAT(LIST_UD_NAME, thing)
#define T_UD_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \
CAT(LIST_UD_NAME, thing2))
#define PT_UD_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
PCAT(LIST_UD_NAME, thing2)))
#endif
#endif /* !anon --> */
/* Constants across multiple includes in the same translation unit. */
#ifndef LIST_H /* <-- LIST_H */
#define LIST_H
/* \see{combine_sets} operations bit-vector; dummy {LO_?}: {clang -Weverything}
complains that it is not closed under union, a very valid point. */
enum ListOperation {
LO_SUBTRACTION_AB = 1,
LO_SUBTRACTION_BA = 2,
LO_A,
LO_INTERSECTION = 4,
LO_B, LO_C, LO_D,
LO_DEFAULT_A = 8,
LO_E, LO_F, LO_G, LO_H, LO_I, LO_J, LO_K,
LO_DEFAULT_B = 16,
LO_L, LO_M, LO_N, LO_O, LO_P, LO_Q, LO_R, LO_S,
LO_T, LO_U, LO_V, LO_W, LO_X, LO_Y, LO_Z
};
#endif /* LIST_H */
/* One time in the same translation unit. */
#ifndef MIGRATE /* <-- migrate */
#define MIGRATE
/** Contains information about a {realloc}. */
struct Migrate;
struct Migrate {
const void *begin, *end; /* Old pointers. */
ptrdiff_t delta;
};
#endif /* migrate --> */
/* Private list position. */
struct PT_(X) {
#ifdef LIST_UA_NAME
struct PT_(X) *UA_(prev), *UA_(next);
#endif
#ifdef LIST_UB_NAME
struct PT_(X) *UB_(prev), *UB_(next);
#endif
#ifdef LIST_UC_NAME
struct PT_(X) *UC_(prev), *UC_(next);
#endif
#ifdef LIST_UD_NAME
struct PT_(X) *UD_(prev), *UD_(next);
#endif
};
/** A single link in the linked-list derived from {<T>}. Storage of this
structure is the responsibility of the caller. The {<T>} is stored in the
element {data}. */
struct T_(Link);
struct T_(Link) {
T data;
struct PT_(X) x;
};
/** Serves as an a head for linked-list(s) of {<T>Link}. Use
\see{<T>ListClear} to initialise. */
struct T_(List);
struct T_(List) {
/* {head.prev} and {tail.next} are always null. The nodes always have a
non-null {head} and {tail}. This allows the inference of everything
from everything else and reduces the number of arguments that we have to
pass. \see{<T>ListSelfCorrect} uses the fact that {head} and {tail} are
packed to determine whether a list is empty, therefore, {head, tail}
cannot be separated or reversed; furthermore, one cannot have
{sizeof data == 0} and call \see{<T>ListSelfCorrect}, though I don't see
how one could do that. */
struct PT_(X) head, tail;
};
/** Takes {<T>}; used in \see{<T>List<U>ForEach}. This definition is about the
{LIST_NAME} type, that is, it is without the prefix {List}; to avoid namespace
collisions, this is private, meaning the name is mangled. If one want this
definition, re-declare it. */
typedef void (*PT_(Action))(T *const);
/** Takes {<T>} and <void *>; used in \see{<T>List<U>BiForEach}. */
typedef void (*PT_(BiAction))(T *const, void *const);
/** Takes {<T>}, returns (non-zero) true or (zero) false. */
typedef int (*PT_(Predicate))(const T *const);
/** Takes {<T>} and {void *}, returns (non-zero) true or (zero) false. */
typedef int (*PT_(BiPredicate))(T *const, void *const);
#ifdef LIST_SOME_COMPARATOR /* <-- comp */
/** Compares two {<T>} values and returns less then, equal to, or greater then
zero. Should do so forming an equivalence relation with respect to {<T>}. */
typedef int (*PT_(Comparator))(const T *, const T *);
#endif /* comp --> */
#ifdef LIST_TO_STRING /* <-- string */
/** Responsible for turning {<T>} (the first argument) into a 12 {char}
null-terminated output string (the second.) */
typedef void (*PT_(ToString))(const T *const, char (*const)[12]);
/* Check that {LIST_TO_STRING} is a function implementing {<T>ToString}. */
static const PT_(ToString) PT_(to_string) = (LIST_TO_STRING);
#endif /* string --> */
/** Private: {container_of}. */
static struct T_(Link) *PT_(node_holds_x)(struct PT_(X) *const x) {
return (struct T_(Link) *)(void *)
((char *)x - offsetof(struct T_(Link), x));
}
/** Private: {container_of}. */
static struct T_(Link) *PT_(node_holds_data)(T *const data) {
return (struct T_(Link) *)(void *)
((char *)data - offsetof(struct T_(Link), data));
}
/** Private: {container_of}; used for \see{<T>List<U>Next}, {etc}. */
static const struct T_(Link) *PT_(node_holds_const_data)(
const T *const data) {
return (const struct T_(Link) *)(const void *)
((const char *)data - offsetof(struct T_(Link), data));
}
/** Private: used in \see{<PT>_order_<U>_migrate_each};
\${ptr \in [begin, end) -> ptr += delta}. */
static int PT_(migrate)(struct PT_(X) **const x_ptr,
const struct Migrate *const migrate) {
const void *const x = *x_ptr;
assert(x_ptr);
if(x < migrate->begin || x >= migrate->end) return 0;
*(char **)x_ptr += migrate->delta;
return 1;
}
/* Prototypes: needed for the next section, but undefined until later. */
static void PT_(add_before)(struct PT_(X) *const, struct PT_(X) *const);
static void PT_(add_after)(struct PT_(X) *const, struct PT_(X) *const);
static void PT_(remove)(struct PT_(X) *const);
/* Note to future self: recursive includes. The {LIST_U_NAME} pre-processor
flag controls this behaviour; we are currently in the {!LIST_U_NAME} section.
These will get all the functions with {<U>} in them from below. */
#ifdef LIST_UA_NAME /* <-- a */
#define LIST_U_NAME LIST_UA_NAME
#ifdef LIST_UA_COMPARATOR /* <-- comp */
#define LIST_U_COMPARATOR LIST_UA_COMPARATOR
#endif /* comp --> */
#include "List.h"
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
#define LIST_U_NAME LIST_UB_NAME
#ifdef LIST_UB_COMPARATOR /* <-- comp */
#define LIST_U_COMPARATOR LIST_UB_COMPARATOR
#endif /* comp --> */
#include "List.h"
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
#define LIST_U_NAME LIST_UC_NAME
#ifdef LIST_UC_COMPARATOR /* <-- comp */
#define LIST_U_COMPARATOR LIST_UC_COMPARATOR
#endif /* comp --> */
#include "List.h"
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
#define LIST_U_NAME LIST_UD_NAME
#ifdef LIST_UD_COMPARATOR /* <-- comp */
#define LIST_U_COMPARATOR LIST_UD_COMPARATOR
#endif /* comp --> */
#include "List.h"
#endif /* d --> */
/** Private: add before {x}. */
static void PT_(add_before)(struct PT_(X) *const x, struct PT_(X) *const add) {
assert(x && add && x != add);
#ifdef LIST_UA_NAME /* <-- a */
PT_UA_(x, add_before)(x, add);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
PT_UB_(x, add_before)(x, add);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
PT_UC_(x, add_before)(x, add);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
PT_UD_(x, add_before)(x, add);
#endif /* d --> */
}
/** Private: add after {x}. */
static void PT_(add_after)(struct PT_(X) *const x, struct PT_(X) *const add) {
assert(x && add && x != add);
#ifdef LIST_UA_NAME /* <-- a */
PT_UA_(x, add_after)(x, add);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
PT_UB_(x, add_after)(x, add);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
PT_UC_(x, add_after)(x, add);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
PT_UD_(x, add_after)(x, add);
#endif /* d --> */
}
/** Private: remove from list.
@implements <T>LinkAction */
static void PT_(remove)(struct PT_(X) *const x) {
#ifdef LIST_UA_NAME /* <-- a */
PT_UA_(x, remove)(x);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
PT_UB_(x, remove)(x);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
PT_UC_(x, remove)(x);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
PT_UD_(x, remove)(x);
#endif /* d --> */
}
/** Private: clears and initialises. */
static void PT_(clear)(struct T_(List) *const list) {
assert(list);
#ifdef LIST_UA_NAME
list->head.UA_(prev) = list->tail.UA_(next) = 0;
list->head.UA_(next) = &list->tail;
list->tail.UA_(prev) = &list->head;
#endif
#ifdef LIST_UB_NAME
list->head.UB_(prev) = list->tail.UB_(next) = 0;
list->head.UB_(next) = &list->tail;
list->tail.UB_(prev) = &list->head;
#endif
#ifdef LIST_UC_NAME
list->head.UC_(prev) = list->tail.UC_(next) = 0;
list->head.UC_(next) = &list->tail;
list->tail.UC_(prev) = &list->head;
#endif
#ifdef LIST_UD_NAME
list->head.UD_(prev) = list->tail.UD_(next) = 0;
list->head.UD_(next) = &list->tail;
list->tail.UD_(prev) = &list->head;
#endif
}
/** Private: add all {from} before {x}. */
static void PT_(add_list_before)(struct PT_(X) *const x,
struct T_(List) *const from) {
assert(x && from);
#ifdef LIST_UA_NAME /* <-- a */
PT_UA_(x, cat)(x, from);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
PT_UB_(x, cat)(x, from);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
PT_UC_(x, cat)(x, from);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
PT_UD_(x, cat)(x, from);
#endif /* d --> */
}
/** Clears and removes all values from {list}, thereby initialising the
{<T>List}. All previous values are un-associated.
@param list: if null, does nothing.
@order \Theta(1)
@allow */
static void T_(ListClear)(struct T_(List) *const list) {
if(!list) return;
PT_(clear)(list);
}
/** Initialises the contents of the node which contains {add} to add it to the
beginning of {list}. If either {list} or {add} is null, it does nothing.
@param add: Must be inside of a {<T>Link} and not associated to any list;
this associates the {<T>Link} with the list.
@order \Theta(1)
@fixme Untested.
@allow */
static void T_(ListUnshift)(struct T_(List) *const list, T *const add) {
if(!list || !add) return;
PT_(add_after)(&list->head, &PT_(node_holds_data)(add)->x);
}
/** Initialises the contents of the node which contains {add} to add it to the
end of {list}.
@param list: If null, does nothing.
@param add: If null, does nothing, otherwise must be inside of a
{<T>Link} and not associated to any list; this associates the
{<T>Link} that {add} is a part of with {list}, if it exists.
@order \Theta(1)
@allow */
static void T_(ListPush)(struct T_(List) *const list, T *const add) {
if(!list || !add) return;
PT_(add_before)(&list->tail, &PT_(node_holds_data)(add)->x);
}
/** Initialises the contents of the node which contains {add} to add it
immediately before {data}.
@param data: If null, does nothing, otherwise must be part of a list.
@param add: If null, does nothing, otherwise must be inside of a {<T>Link}
and not associated to any list; this associates the {<T>Link} with the
list of which {data} is a part, if it exists.
@order \Theta(1)
@fixme Untested.
@allow */
static void T_(ListAddBefore)(T *const data, T *const add) {
if(!data || !add) return;
PT_(add_before)(&PT_(node_holds_data)(data)->x,
&PT_(node_holds_data)(add)->x);
}
/** Initialises the contents of the node which contains {add} to add it
immediately after {data}.
@param data: If null, does nothing, otherwise must be part of a list.
@param add: If null, does nothing, otherwise must be inside of a {<T>Link}
and not associated to any list; this associates the {<T>Link} with the
list of which {data} is a part, if it exists.
@order \Theta(1)
@fixme Untested.
@allow */
static void T_(ListAddAfter)(T *const data, T *const add) {
if(!data || !add) return;
PT_(add_after)(&PT_(node_holds_data)(data)->x,
&PT_(node_holds_data)(add)->x);
}
/** Un-associates {data} from the list; consequently, the {data} is free to add
to another list or delete. Removing an element that was not added to a list
results in undefined behaviour.
@param data: If null, does nothing.
@order \Theta(1)
@allow */
static void T_(ListRemove)(T *const data) {
if(!data) return;
PT_(remove)(&PT_(node_holds_data)(data)->x);
}
/** Appends the elements of {from} onto {list}. Unlike \see{<T>List<U>TakeIf}
and all other selective choosing functions, that is, the ones with {<U>}, this
function preserves two or more orders.
@param list: If null, then it removes elements.
@param from: If null, it does nothing, otherwise this list will be empty on
return.
@order \Theta(1)
@allow */
static void T_(ListTake)(struct T_(List) *const list,
struct T_(List) *const from) {
if(!from || from == list) return;
if(!list) { PT_(clear)(from); return; }
PT_(add_list_before)(&list->tail, from);
}
/** Appends the elements from {from} before {data}.
@param data: If null, does nothing, otherwise if not part of a valid list,
results are undefined.
@param from: If null, does nothing, otherwise this list will be empty on
return.
@order \Theta(1)
@fixme Untested.
@allow */
static void T_(ListTakeBefore)(T *const data, struct T_(List) *const from) {
if(!data || !from) return;
PT_(add_list_before)(&PT_(node_holds_data)(data)->x, from);
}
#ifdef LIST_SOME_COMPARATOR /* <-- comp */
/** Merges the elements from {from} into {list}. This uses local order and it
doesn't sort them first; see \see{<T>ListSort}. Concatenates all lists that
don't have a {LIST_COMPARATOR} or {LIST_U[A-D]_COMPARATOR}.
@param list: if null, then it removes elements.
@param from: if null, does nothing.
@order O({list}.n + {from}.n)
@allow */
static void T_(ListMerge)(struct T_(List) *const list,
struct T_(List) *const from) {
if(!from || from == list) return;
if(!list) { PT_(clear)(from); return; }
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp parallel sections
#endif /* omp --> */
{
#ifdef LIST_UA_NAME /* <-- a */
#ifdef LIST_UA_COMPARATOR /* <-- comp */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UA_(list, merge)(list, from);
#else /* comp --><-- !comp */
PT_UA_(list, cat)(list, from);
#endif /* !comp --> */
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
#ifdef LIST_UB_COMPARATOR /* <-- comp */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UB_(list, merge)(list, from);
#else /* comp --><-- !comp */
PT_UB_(list, cat)(list, from);
#endif /* !comp --> */
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
#ifdef LIST_UC_COMPARATOR /* <-- comp */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UC_(list, merge)(list, from);
#else /* comp --><-- !comp */
PT_UC_(list, cat)(list, from);
#endif /* !comp --> */
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
#ifdef LIST_UD_COMPARATOR /* <-- comp */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UD_(list, merge)(list, from);
#else /* comp --><-- !comp */
PT_UD_(list, cat)(list, from);
#endif /* !comp --> */
#endif /* d --> */
}
}
#ifndef LIST_U_ANONYMOUS /* <-- !anon; already has ListSort, T_U_(List, Sort) */
/** Performs a stable, adaptive sort on all orders which have comparators.
@param list: If null, does nothing.
@order \Omega({list}.n), O({list}.n log {list}.n)
@allow */
static void T_(ListSort)(struct T_(List) *const list) {
if(!list) return;
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp parallel sections
#endif /* omp --> */
{
#ifdef LIST_UA_COMPARATOR /* <-- a */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UA_(natural, sort)(list);
#endif /* a --> */
#ifdef LIST_UB_COMPARATOR /* <-- b */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UB_(natural, sort)(list);
#endif /* b --> */
#ifdef LIST_UC_COMPARATOR /* <-- c */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UC_(natural, sort)(list);
#endif /* c --> */
#ifdef LIST_UD_COMPARATOR /* <-- d */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UD_(natural, sort)(list);
#endif /* d --> */
}
}
#endif /* !anon --> */
#endif /* comp --> */
/** Adjusts one {<T>Link}'s internal pointers when supplied with a
{Migrate} parameter.
@param data: If null, does nothing.
@param migrate: If null, does nothing. Should only be called in a {Migrate}
function; pass the {migrate} parameter.
@implements <<T>Link>Migrate
@order \Theta(n)
@allow */
static void T_(LinkMigrate)(T *const data, const struct Migrate *const migrate){
struct PT_(X) *x;
/* Relies on not-strictly-defined behaviour because pointers are not
necessarily contiguous in memory; it should be fine in practice. */
if(!data || !migrate || !migrate->delta) return;
x = &PT_(node_holds_data)(data)->x;
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp parallel sections
#endif /* omp --> */
{
#ifdef LIST_UA_NAME /* <-- a */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UA_(x, migrate)(x, migrate);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UB_(x, migrate)(x, migrate);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UC_(x, migrate)(x, migrate);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UD_(x, migrate)(x, migrate);
#endif /* d --> */
}
}
/** Adjusts a pointer, {pdata}, to a {<T>Link}, given {migrate}.
@param pdata, migrate: If null, does nothing.
@fixme Untested. */
static void T_(LinkMigratePointer)(T **const pdata,
const struct Migrate *const migrate) {
const void *data;
if(!pdata || !migrate) return;
data = *pdata;
if(data < migrate->begin || data >= migrate->end) return;
*(char **)pdata += migrate->delta;
}
/** One must call this whenever the {<T>List} changes memory locations, (not
the nodes.) This resets and corrects the two ends; the two ends become invalid
even when it's empty.
@param list: If null, does nothing.
@order O(1)
@fixme Untested.
@allow */
static void T_(ListSelfCorrect)(struct T_(List) *const list) {
if(!list) return;
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp parallel sections
#endif /* omp --> */
{
#ifdef LIST_UA_NAME /* <-- a */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UA_(list, self_correct)(list);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UB_(list, self_correct)(list);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UC_(list, self_correct)(list);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
PT_UD_(list, self_correct)(list);
#endif /* d --> */
}
}
/** Debugging purposes. Turn {LIST_TEST} on. */
static void T_(ListAudit)(const struct T_(List) *const list) {
size_t i, j;
int is_j = 0;
if(!list) return;
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp parallel sections
#endif /* omp --> */
{
#ifdef LIST_UA_NAME /* <-- a */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
i = PT_UA_(x, audit)(list);
if(is_j) assert(i == j); else (j = i, is_j = 1);
#endif /* a --> */
#ifdef LIST_UB_NAME /* <-- b */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
i = PT_UB_(x, audit)(list);
if(is_j) assert(i == j); else (j = i, is_j = 1);
#endif /* b --> */
#ifdef LIST_UC_NAME /* <-- c */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
i = PT_UC_(x, audit)(list);
if(is_j) assert(i == j); else (j = i, is_j = 1);
#endif /* c --> */
#ifdef LIST_UD_NAME /* <-- d */
#ifdef LIST_OPENMP /* <-- omp */
#pragma omp section
#endif /* omp --> */
i = PT_UD_(x, audit)(list);
if(is_j) assert(i == j); else (j = i, is_j = 1);
#endif /* d --> */
}
}
#ifdef LIST_TEST /* <-- test */
#include "../test/TestList.h" /* Need this file if one is going to run tests. */
#endif /* test --> */
static void PT_(unused_coda)(void);
/** This silences unused function warnings from the pre-processor, but allows
optimisation, (hopefully.)
\url{ http://stackoverflow.com/questions/43841780/silencing-unused-static-function-warnings-for-a-section-of-code } */
static void PT_(unused_list)(void) {
T_(ListClear)(0);
T_(ListUnshift)(0, 0);
T_(ListPush)(0, 0);
T_(ListAddBefore)(0, 0);
T_(ListAddAfter)(0, 0);
T_(ListRemove)(0);
T_(ListTake)(0, 0);
T_(ListTakeBefore)(0, 0);
#ifdef LIST_SOME_COMPARATOR /* <-- comp */
T_(ListMerge)(0, 0);
T_(ListSort)(0);
#endif /* comp --> */
T_(LinkMigrate)(0, 0);
T_(LinkMigratePointer)(0, 0);
T_(ListSelfCorrect)(0);
T_(ListAudit)(0);
PT_(unused_coda)();
}
/** {clang}'s pre-processor is not fooled if one has one function. */
static void PT_(unused_coda)(void) { PT_(unused_list)(); }
/* Un-define all macros. */
#undef LIST_NAME
#undef LIST_TYPE
/* Undocumented; allows nestled inclusion so long as: {CAT_}, {CAT}, {PCAT},
{PCAT_} conform, and {T}, and {U}, are not used. */
#ifdef LIST_SUBTYPE /* <-- sub */
#undef LIST_SUBTYPE
#else /* sub --><-- !sub */
#undef CAT
#undef CAT_
#undef PCAT
#undef PCAT_
#endif /* !sub --> */
#undef T
#undef T_
#undef PT_
#ifdef LIST_TO_STRING
#undef LIST_TO_STRING
#endif
#ifdef LIST_FILLER
#undef LIST_FILLER
#endif
#ifdef LIST_COMPARATOR
#undef LIST_COMPARATOR
#endif
#ifdef LIST_U_ANONYMOUS
#undef LIST_U_ANONYMOUS
#endif
#ifdef LIST_UA_NAME
#undef LIST_UA_NAME
#endif
#ifdef LIST_UA_COMPARATOR
#undef LIST_UA_COMPARATOR
#endif
#ifdef LIST_UB_NAME
#undef LIST_UB_NAME
#endif
#ifdef LIST_UB_COMPARATOR
#undef LIST_UB_COMPARATOR
#endif
#ifdef LIST_UC_NAME
#undef LIST_UC_NAME
#endif
#ifdef LIST_UC_COMPARATOR
#undef LIST_UC_COMPARATOR
#endif
#ifdef LIST_UD_NAME
#undef LIST_UD_NAME
#endif
#ifdef LIST_UD_COMPARATOR
#undef LIST_UD_COMPARATOR
#endif
#ifdef LIST_OPENMP
#undef LIST_OPENMP
#endif
#ifdef LIST_TEST
#undef LIST_TEST
#endif
#ifdef LIST_DEBUG
#undef LIST_DEBUG
#endif
#ifdef LIST_NDEBUG
#undef LIST_NDEBUG
#undef NDEBUG
#endif
#ifdef LIST_SOME_COMPARATOR
#undef LIST_SOME_COMPARATOR
#endif
#ifdef LIST_SORT_INTERNALS
#undef LIST_SORT_INTERNALS /* Each List type has their own. */
#endif
#else /* !LIST_U_NAME --><-- LIST_U_NAME
Internally #included.
@param LIST_U_NAME: A unique name of the linked list; required;
@param LIST_U_COMPARATOR: an optional comparator. */
/* Generics using the preprocessor. */
#ifdef T_U_
#undef T_U_
#endif
#ifdef PT_U_
#undef PT_U_
#endif
#ifdef U_
#undef U_
#endif
#ifdef LIST_U_ANONYMOUS /* <-- anon: "empty macro arguments were standardized
in C99" */
#define U_(thing) PCAT(anonymous, thing)
#define T_U_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), thing2)
#define PT_U_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
CAT(_, thing2)))
#else /* anon --><-- !anon */
#define U_(thing) PCAT(LIST_U_NAME, thing)
#define T_U_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \
CAT(LIST_U_NAME, thing2))
#define PT_U_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \
PCAT(LIST_U_NAME, thing2)))
#endif /* !anon --> */
/** "Floyd's" tortoise-hare algorithm for cycle detection when in debug mode.
You do not want cycles! */
static void PT_U_(cycle, crash)(struct PT_(X) *const x) {
#ifdef LIST_DEBUG
struct PT_(X) *turtle, *hare;
assert(x);
for(turtle = x; turtle->U_(prev); turtle = turtle->U_(prev));
for(hare = turtle->U_(next); (turtle = turtle->U_(next),
hare = hare->U_(next)) && (hare = hare->U_(next)); ) {
assert(turtle != hare);
}
#else
(void)(x);
#endif
}
/** Private: audit index by going though it forwards then back.
@return Number of elements. */
static size_t PT_U_(x, audit)(const struct T_(List) *const list) {
struct PT_(X) *emu;
size_t f = 0, b = 0;
assert(list);
for(emu = list->head.U_(next); emu->U_(next); emu = emu->U_(next)) f++;
for(emu = list->tail.U_(prev); emu->U_(prev); emu = emu->U_(prev)) b++;
assert(f == b);
return f;
}
/** Private: add {add} before {x}. */
static void PT_U_(x, add_before)(struct PT_(X) *const x,
struct PT_(X) *const add) {
assert(x && add && x != add && x->U_(prev));
add->U_(prev) = x->U_(prev);
add->U_(next) = x;
x->U_(prev)->U_(next) = add;
x->U_(prev) = add;
PT_U_(cycle, crash)(add);
}
/** Private: add {add} after {x}. */
static void PT_U_(x, add_after)(struct PT_(X) *const x,
struct PT_(X) *const add) {
assert(x && add && x != add && x->U_(next));
add->U_(prev) = x;
add->U_(next) = x->U_(next);
x->U_(next)->U_(prev) = add;
x->U_(next) = add;
PT_U_(cycle, crash)(add);
}
/** Private: list remove in {<U>}. */
static void PT_U_(x, remove)(struct PT_(X) *const x) {
assert(x->U_(prev) && x->U_(next));
x->U_(prev)->U_(next) = x->U_(next);
x->U_(next)->U_(prev) = x->U_(prev);
x->U_(prev) = x->U_(next) = 0; /* Just to be clean. */
}
/** Private: cats all {from} in front of {x}, (don't cat {head}, instead
{head->next}); {from} will be empty after. Careful that {x} is not in {from}
because that will just erase the list.
@order \Theta(1) */
static void PT_U_(x, cat)(struct PT_(X) *const x,
struct T_(List) *const from) {
assert(x && from && x->U_(prev) &&
!from->head.U_(prev) && from->head.U_(next)
&& from->tail.U_(prev) && !from->tail.U_(next));
from->head.U_(next)->U_(prev) = x->U_(prev);
x->U_(prev)->U_(next) = from->head.U_(next);
from->tail.U_(prev)->U_(next) = x;
x->U_(prev) = from->tail.U_(prev);
from->head.U_(next) = &from->tail;
from->tail.U_(prev) = &from->head;
PT_U_(cycle, crash)(x);
PT_U_(cycle, crash)(&from->head);
}
/** Private: callback when {realloc} changes pointers. Called in
\see{PT_(migrate_each)}.
@order \Theta(1) */
static void PT_U_(x, migrate)(struct PT_(X) *const x,
const struct Migrate *const migrate) {
int is;
assert(x && x->U_(prev) && x->U_(next) && migrate && migrate->begin
&& migrate->begin < migrate->end && migrate->delta);
/* If node out of the migration region, it must have node into. Otherwise,
assume the other node is on the list of migrates. */
if(!PT_(migrate)(&x->U_(prev), migrate))
is = PT_(migrate)(&x->U_(prev)->U_(next), migrate), assert(is);
if(!PT_(migrate)(&x->U_(next), migrate))
is = PT_(migrate)(&x->U_(next)->U_(prev), migrate), assert(is);
(void)is;
}
/** Private: when the actual list but not the data changes locations. */
static void PT_U_(list, self_correct)(struct T_(List) *const list) {
if(list->head.U_(next) == list->tail.U_(prev) + 1) {
/* They are packed -> the list is empty. @fixme Test thoroughly! */
list->head.U_(next) = &list->tail;
list->tail.U_(prev) = &list->head;
} else {
list->head.U_(next)->U_(prev) = &list->head;
list->tail.U_(prev)->U_(next) = &list->tail;
}
}
/** @param data: Must be part of a {List}. If {data} are not part of a valid
list or has migrated locations due to a backing {realloc}, this function is
undefined. If null, returns null.
@return The next element in {<U>}. When {data} is the last element, returns
null.
@order \Theta(1)
@allow */
static T *T_U_(List, Next)(const T *const data) {
const struct PT_(X) *const x = &PT_(node_holds_const_data)(data)->x;
struct PT_(X) *next_x;
if(!data) return 0;
assert(x->U_(next));
if(!(next_x = x->U_(next))->U_(next)) return 0;
return &PT_(node_holds_x)(next_x)->data;
}
/** @param data: Must be part of a {List}. If {data} are not part of a valid
list or has migrated locations due to a backing {realloc}, this function is
undefined. If null, returns null.
@return The previous element in {<U>}. When {data} is the first element,
returns null.
@order \Theta(1)
@allow */
static T *T_U_(List, Previous)(const T *const data) {
const struct PT_(X) *const x = &PT_(node_holds_const_data)(data)->x;
struct PT_(X) *prev_x;
if(!data) return 0;
assert(x->U_(prev));
if(!(prev_x = x->U_(prev))->U_(prev)) return 0;
return &PT_(node_holds_x)(prev_x)->data;
}
/** @param list: If null, returns null.
@return A pointer to the first element of {list}.
@order \Theta(1)
@allow */
static T *T_U_(List, First)(const struct T_(List) *const list) {
if(!list) return 0;
assert(list->head.U_(next));
if(!list->head.U_(next)->U_(next)) return 0; /* Empty. */
return &PT_(node_holds_x)(list->head.U_(next))->data;
}
/** @param list: If null, returns null.
@return A pointer to the last element of {list}.
@order \Theta(1)
@allow */
static T *T_U_(List, Last)(const struct T_(List) *const list) {
if(!list) return 0;
assert(list->tail.U_(prev));
if(!list->tail.U_(prev)->U_(prev)) return 0; /* Empty. */
return &PT_(node_holds_x)(list->tail.U_(prev))->data;
}
/** Un-associates the first element in the order {<U>} with the list, if the
list is not empty.
@param list: If null, returns null.
@return The erstwhile first element or null if the list was empty.
@fixme Untested. */
static T *T_U_(List, Shift)(struct T_(List) *const list) {
struct PT_(X) *x;
if(!list) return 0;
if(!(x = list->head.U_(next))->U_(next)) return 0;
PT_(remove)(x);
return &PT_(node_holds_x)(x)->data;
}
/** Un-associates the last element in the order {<U>} with the list, if the
list is not empty.
@param list: If null, returns null.
@return The erstwhile last element or null if the list was empty.
@fixme Untested. */
static T *T_U_(List, Pop)(struct T_(List) *const list) {
struct PT_(X) *x;
if(!list) return 0;
if(!(x = list->tail.U_(prev))->U_(prev)) return 0;
PT_(remove)(x);
return &PT_(node_holds_x)(x)->data;
}
#ifdef LIST_U_COMPARATOR /* <-- comp */
/* Check that each of {LIST_COMPARATOR} and {LIST_U[A-D]_COMPARATOR} are
functions implementing {<PT>Comparator}. */
static const PT_(Comparator) PT_U_(data, cmp) = (LIST_U_COMPARATOR);
/** Private: merges {blist} into {alist} when we don't know anything about the
data; on equal elements, places {alist} first.
@order {O(n + m)}. */
static void PT_U_(list, merge)(struct T_(List) *const alist,
struct T_(List) *const blist) {
struct PT_(X) *hind, *a, *b;
assert(alist && blist);
/* {blist} empty -- that was easy. */
if(!(b = blist->head.U_(next))->U_(next)) return;
/* {alist} empty -- {O(1)} cat is more efficient. */
if(!(a = alist->head.U_(next))->U_(next))
{ PT_U_(x, cat)(&alist->tail, blist); return; }
/* Merge */
for(hind = &alist->head; ; ) {
if(PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data) < 0) {
a->U_(prev) = hind, hind = hind->U_(next) = a;
if(!(a = a->U_(next))->U_(next))
{ b->U_(prev) = hind, hind->U_(next) = b;
blist->tail.U_(prev)->U_(next) = &alist->tail,
alist->tail.U_(prev) = blist->tail.U_(prev); break; }
} else {
b->U_(prev) = hind, hind = hind->U_(next) = b;
if(!(b = b->U_(next))->U_(next))
{ a->U_(prev) = hind, hind->U_(next) = a; break; }
}
}
blist->head.U_(next) = &blist->tail, blist->tail.U_(prev) = &blist->head;
}
#ifndef LIST_SORT_INTERNALS /* <!-- sort internals only once per translation
unit */
#define LIST_SORT_INTERNALS
/* A run is a temporary sequence of values in the array that is weakly
increasing; we store it's size temporarily. */
struct PT_(Run) {
struct PT_(X) *head, *tail;
size_t size;
};
/* Store the maximum capacity for the indexing with {size_t}. (Much more then
we need, in most cases.) \${
range(runs) = Sum_{k=0}^runs 2^{runs-k} - 1
= 2^{runs+1} - 2
2^bits = 2 (r^runs - 1)
runs = log(2^{bits-1} + 1) / log 2
runs <= 2^{bits - 1}, 2^{bits + 1} > 0} */
struct PT_(Runs) {
struct PT_(Run) run[(sizeof(size_t) << 3) - 1];
size_t run_no;
};
#endif /* sort internals --> */
/** Inserts the first element from the larger of two sorted runs, then merges
the rest. \cite{Peters2002Timsort}, via \cite{McIlroy1993Optimistic}, does
long merges by galloping, but we don't have random access to the data. In
practice, this is {2%} slower on randomly distributed keys when the
linked-list size is over {500 000}; randomly distributed keys have high
insertion times that to well in standard merging. However, it's (potentially
much) faster when the keys have structure: observed, {[-2%, 500%]}. */
static void PT_U_(runs, merge)(struct PT_(Runs) *const r) {
struct PT_(Run) *const run_a = r->run + r->run_no - 2;
struct PT_(Run) *const run_b = run_a + 1;
struct PT_(X) *a = run_a->tail, *b = run_b->head, *chosen;
assert(r->run_no >= 2);
/* @fixme We are doing one-to-many compares in some cases? */
if(run_a->size <= run_b->size) {
struct PT_(X) *prev_chosen;
/* Run {a} is smaller: downwards insert {b.head} followed by upwards
merge. Insert the first element of {b} downwards into {a}. */
for( ; ; ) {
if(PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data) <= 0) {
chosen = a;
a = a->U_(next);
break;
}
if(!a->U_(prev)) {
run_a->head = run_b->head;
chosen = b;
b = b->U_(next);
break;
}
a = a->U_(prev);
}
/* Merge upwards; while the lists are interleaved. */
while(chosen->U_(next)) {
prev_chosen = chosen;
if(PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data) > 0) {
chosen = b;
b = b->U_(next);
} else {
chosen = a;
a = a->U_(next);
}
prev_chosen->U_(next) = chosen;
chosen->U_(prev) = prev_chosen;
}
/* Splice the one list left. */
if(!a) {
b->U_(prev) = chosen;
chosen->U_(next) = b;
run_a->tail = run_b->tail;
} else {
a->U_(prev) = chosen;
chosen->U_(next) = a;
}
} else {
struct PT_(X) *next_chosen;
int is_a_tail = 0;
/* Run {b} is smaller; upwards insert followed by downwards merge.
Insert the last element of {a} upwards into {b}. */
for( ; ; ) {
if(PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data) <= 0) {
chosen = b;
b = b->U_(prev);
break;
}
/* Here, {a > b}. */
if(!b->U_(next)) {
is_a_tail = -1;
chosen = a;
a = a->U_(prev);
break;
}
b = b->U_(next);
}
if(!is_a_tail) run_a->tail = run_b->tail;
/* Merge downwards, while the lists are interleaved. */
while(chosen->U_(prev)) {
next_chosen = chosen;
if(PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data) > 0) {
chosen = a;
a = a->U_(prev);
} else {
chosen = b;
b = b->U_(prev);
}
next_chosen->U_(prev) = chosen;
chosen->U_(next) = next_chosen;
}
/* Splice the one list left. */
if(!a) {
b->U_(next) = chosen;
chosen->U_(prev) = b;
run_a->head = run_b->head;
} else {
a->U_(next) = chosen;
chosen->U_(prev) = a;
}
}
run_a->size += run_b->size;
r->run_no--;
}
/** It's kind of experimental. It hasn't been optimised; I think it does
useless compares. It's so beautiful. */
static void PT_U_(natural, sort)(struct T_(List) *const list) {
/* This is potentially half-a-KB; we had an option to store as a global,
but that was probably overkill. */
struct PT_(Runs) runs;
struct PT_(Run) *new_run;
/* Part of the state machine for classifying points wrt their neighbours. */
enum { UNSURE, INCREASING, DECREASING } mono;
/* The data that we are sorting. */
struct PT_(X) *a, *b, *c, *first_iso_a;
/* {run_count} is different from {runs.run_no} in that it only increases;
only used for calculating the path up the tree. */
size_t run_count, rc;
/* The value of the comparison. */
int comp;
/* Needs an element. */
a = list->head.U_(next), assert(a);
if(!(b = a->U_(next))) return;
/* Reset the state machine and output to just {a} in the first run. */
mono = UNSURE;
runs.run_no = 1;
new_run = runs.run + 0, run_count = (size_t)1;
new_run->size = 1;
first_iso_a = new_run->head = new_run->tail = a;
/* While {a} and {b} are elements (that are consecutive.) {c} may not be. */
for(c = b->U_(next); c; a = b, b = c, c = c->U_(next)) {
comp = PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data);
/* State machine that considers runs in both directions -- in practice,
slightly slower than only considering increasing runs on most cases;
however, I would hate to see my code replaced with one line; reverse
order is 15 times faster, but it's not likely. */
if(comp < 0) { /* {a < b}, increasing -- good. */
if(mono != DECREASING) { /* If decreasing, inflection. */
mono = INCREASING;
new_run->size++;
continue;
}
} else if(comp > 0) { /* Decreasing; reverse preserving stability. */
if(mono != INCREASING) { /* If increasing, inflection. */
mono = DECREASING;
b->U_(next) = first_iso_a;
first_iso_a->U_(prev) = b;
new_run->head = first_iso_a = b;
new_run->size++;
continue;
}
new_run->tail = a; /* Terminating an increasing sequence. */
} else { /* {a} == {b} */
if(mono == DECREASING) { /* Extend. */
struct PT_(X) *const a_next = a->U_(next);
b->U_(next) = a_next;
a_next->U_(prev) = b;
a->U_(next) = b;
b->U_(prev) = a;
} else { /* Monotone or weakly increasing. */
new_run->tail = b;
}
new_run->size++;
continue;
}
/* Head and tail don't necessarily correspond to the first and last. */
new_run->head->U_(prev) = new_run->tail->U_(next) = 0;
/* Greedy merge: keeps space to {O(log n)} instead of {O(n)}. */
for(rc = run_count; !(rc & 1) && runs.run_no >= 2; rc >>= 1)
PT_U_(runs, merge)(&runs);
/* Reset the state machine and output to just {b} at the next run. */
mono = UNSURE;
assert(runs.run_no < sizeof(runs.run) / sizeof(*runs.run));
new_run = runs.run + runs.run_no++, run_count++;
new_run->size = 1;
new_run->head = new_run->tail = first_iso_a = b;
}
/* Terminating the last increasing sequence. */
if(mono == INCREASING) new_run->tail = a;
new_run->tail->U_(next) = new_run->head->U_(prev) = 0;
/* Clean up the rest; when only one run, propagate list_runs[0] to head. */
while(runs.run_no > 1) PT_U_(runs, merge)(&runs);
runs.run[0].head->U_(prev) = &list->head;
runs.run[0].tail->U_(next) = &list->tail;
list->head.U_(next) = runs.run[0].head;
list->tail.U_(prev) = runs.run[0].tail;
}
/** Sorts {<U>}, but leaves the other lists in {<T>} alone. Must have a
comparator defined for the index.
@param list: if null, does nothing.
@order \Omega({list}.n), O({list}.n log {list}.n)
@allow */
static void T_U_(List, Sort)(struct T_(List) *const list) {
if(!list) return;
PT_U_(natural, sort)(list);
}
/** Compares two linked-lists as sequences in the order specified by {<U>}.
@return The first comparator that is not equal to zero, or 0 if they are
equal. Null is considered as before everything else; two null pointers are
considered equal. Must have a comparator defined for this index.
@implements <<T>List>Comparator
@order \Theta(min({alist}.n, {blist}.n))
@allow */
static int T_U_(List, Compare)(const struct T_(List) *const alist,
const struct T_(List) *const blist) {
struct PT_(X) *a, *b;
int diff;
/* Null counts as {-\infty}. */
if(!alist) {
return blist ? -1 : 0;
} else if(!blist) {
return 1;
}
/* Compare element by element. */
for(a = alist->head.U_(next), b = blist->head.U_(next); ;
a = a->U_(next), b = b->U_(next)) {
if(!a->U_(next)) {
return b->U_(next) ? -1 : 0;
} else if(!b->U_(next)) {
return 1;
} else if((diff = PT_U_(data, cmp)
(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data))) {
return diff;
}
}
}
/* @fixme {P_T_(List, Unique)} remove duplicate values. */
/** Private: {list <- a \mask b}. Prefers {a} to {b} when equal.
@order O({a}.n + {b}.n) */
static void PT_U_(boolean, seq)(struct T_(List) *const list,
struct T_(List) *const alist, struct T_(List) *const blist,
const enum ListOperation mask) {
struct PT_(X) *a = alist ? alist->head.U_(next) : 0,
*b = blist ? blist->head.U_(next) : 0, *temp;
int comp;
assert(a && b);
while(a->U_(next) && b->U_(next)) {
comp = PT_U_(data, cmp)(&PT_(node_holds_x)(a)->data,
&PT_(node_holds_x)(b)->data);
if(comp < 0) {
temp = a, a = a->U_(next);
if(mask & LO_SUBTRACTION_AB) {
PT_(remove)(temp);
if(list) PT_(add_before)(&list->tail, temp);
}
} else if(comp > 0) {
temp = b, b = b->U_(next);
if(mask & LO_SUBTRACTION_BA) {
PT_(remove)(temp);
if(list) PT_(add_before)(&list->tail, temp);
}
} else {
temp = a, a = a->U_(next), b = b->U_(next);
if(mask & LO_INTERSECTION) {
PT_(remove)(temp);
if(list) PT_(add_before)(&list->tail, temp);
}
}
}
if(mask & LO_DEFAULT_A) {
while((temp = a, a = a->U_(next))) {
PT_(remove)(temp);
if(list) PT_(add_before)(&list->tail, temp);
}
}
if((mask & LO_DEFAULT_B)) {
while((temp = b, b = b->U_(next))) {
PT_(remove)(temp);
if(list) PT_(add_before)(&list->tail, temp);
}
}
}
/** Appends {list} with {b} subtracted from {a} as a sequence in {<U>}. Must
have a comparator defined.
@param list: If null, then it removes elements.
@order O({a}.n + {b}.n)
@allow */
static void T_U_(List, TakeSubtraction)(struct T_(List) *const list,
struct T_(List) *const a, struct T_(List) *const b) {
PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_DEFAULT_A);
}
/** Appends {list} with the union of {a} and {b} as a sequence in {<U>}. Equal
elements are moved from {a}.
@param list: If null, then it removes elements.
@order O({a}.n + {b}.n)
@allow */
static void T_U_(List, TakeUnion)(struct T_(List) *const list,
struct T_(List) *const a, struct T_(List) *const b) {
PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_SUBTRACTION_BA
| LO_INTERSECTION | LO_DEFAULT_A | LO_DEFAULT_B);
}
/** Appends {list} with the intersection of {a} and {b} as a sequence in {<U>}.
Equal elements are moved from {a}.
@param list: If null, then it removes elements.
@order O({a}.n + {b}.n)
@allow */
static void T_U_(List, TakeIntersection)(struct T_(List) *const list,
struct T_(List) *const a, struct T_(List) *const b) {
PT_U_(boolean, seq)(list, a, b, LO_INTERSECTION);
}
/** Appends {list} with {a} exclusive-or {b} as a sequence in {<U>}. Equal
elements are moved from {a}.
@param list: If null, then it removes elements.
@order O({a}.n + {b}.n)
@allow */
static void T_U_(List, TakeXor)(struct T_(List) *const list,
struct T_(List) *const a, struct T_(List) *const b) {
PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_SUBTRACTION_BA
| LO_DEFAULT_A | LO_DEFAULT_B);
}
#endif /* comp --> */
/** Appends {list} with {from} if {predicate} is null or true in the order
specified by {<U>}.
@param list: If null, then it removes elements.
@param from: If null, does nothing.
@order ~ \Theta({list}.n) \times O({predicate})
@allow */
static void T_U_(List, TakeIf)(struct T_(List) *const list,
struct T_(List) *const from, const PT_(Predicate) predicate) {
struct PT_(X) *x, *next_x;
if(!from || from == list) return;
for(x = from->head.U_(next); (next_x = x->U_(next)); x = next_x) {
if(predicate && !predicate(&PT_(node_holds_x)(x)->data)) continue;
PT_(remove)(x);
if(list) PT_(add_before)(&list->tail, x);
}
}
/** Appends {list} with {from} if {bipredicate} is null or true in the order
specified by {<U>}.
@param list: If null, then it removes elements.
@param from: If null, does nothing.
@order ~ \Theta({list}.n) \times O({predicate})
@fixme Void. No.
@allow */
static void T_U_(List, BiTakeIf)(struct T_(List) *const list,
struct T_(List) *const from, const PT_(BiPredicate) bipredicate,
void *const param) {
struct PT_(X) *x, *next_x;
if(!from || from == list) return;
for(x = from->head.U_(next); (next_x = x->U_(next)); x = next_x) {
if(bipredicate && !bipredicate(&PT_(node_holds_x)(x)->data, param))
continue;
PT_(remove)(x);
if(list) PT_(add_before)(&list->tail, x);
}
}
/** Performs {action} for each element in {list} in the order specified by
{<U>}.
@param list, action: If null, does nothing.
@order ~ \Theta({list}.n) \times O({action})
@allow */
static void T_U_(List, ForEach)(struct T_(List) *const list,
const PT_(Action) action) {
struct PT_(X) *x, *next_x;
if(!list || !action) return;
for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x)
action(&PT_(node_holds_x)(x)->data);
}
/** Performs {biaction} for each element in the list in the order specified by
{<U>}. Do not modify the memory addresses of the elements while the list is
iterating.
@param list, action: If null, does nothing.
@param param: Used as the second parameter of {biaction}.
@order ~ \Theta({list}.n) \times O({biaction})
@fixme Untested.
@fixme Void. No.
@allow */
static void T_U_(List, BiForEach)(struct T_(List) *const list,
const PT_(BiAction) biaction, void *const param) {
struct PT_(X) *x, *next_x;
if(!list || !biaction) return;
for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x)
biaction(&PT_(node_holds_x)(x)->data, param);
}
/** @param list, predicate: If null, returns null.
@return The first {<T>} in the linked-list, ordered by {<U>}, that causes the
{predicate} with {<T>} as argument to return false, or null if the {predicate}
is true for every case.
@order ~ O({list}.n) \times O({predicate})
@allow */
static T *T_U_(List, ShortCircuit)(struct T_(List) *const list,
const PT_(Predicate) predicate) {
struct PT_(X) *x, *next_x;
T *data;
if(!list || !predicate) return 0;
for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x)
if(data = &PT_(node_holds_x)(x)->data, !predicate(data)) return data;
return 0;
}
/** @param list, bipredicate: If null, returns null.
@param param: Used as the second parameter of {bipredicate}.
@return The first {<T>} in the linked-list, ordered by {<U>}, that
causes the {bipredicate} with {<T>} and {param} as arguments to return false,
or null if the {bipredicate} is true for every case.
@order ~ O({list}.n) \times O({predicate})
@fixme Void. No.
@allow */
static T *T_U_(List, BiShortCircuit)(struct T_(List) *const list,
const PT_(BiPredicate) bipredicate, void *const param) {
struct PT_(X) *x, *next_x;
T *data;
if(!list || !bipredicate) return 0;
for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x)
if(data = &PT_(node_holds_x)(x)->data, !bipredicate(data, param))
return data;
return 0;
}
#ifdef LIST_TO_STRING /* <-- print */
#ifndef LIST_PRINT_THINGS /* <-- once inside translation unit */
#define LIST_PRINT_THINGS
static const char *const list_cat_start = "{ ";
static const char *const list_cat_end = " }";
static const char *const list_cat_alter_end = "...}";
static const char *const list_cat_sep = ", ";
static const char *const list_cat_star = "*";
static const char *const list_cat_null = "null";
struct List_SuperCat {
char *print, *cursor;
size_t left;
int is_truncated;
};
static void list_super_cat_init(struct List_SuperCat *const cat,
char *const print, const size_t print_size) {
cat->print = cat->cursor = print;
cat->left = print_size;
cat->is_truncated = 0;
print[0] = '\0';
}
static void list_super_cat(struct List_SuperCat *const cat,
const char *const append) {
size_t lu_took; int took;
if(cat->is_truncated) return;
took = sprintf(cat->cursor, "%.*s", (int)cat->left, append);
if(took < 0) { cat->is_truncated = -1; return; } /*implementation defined*/
if(took == 0) { return; }
if((lu_took = (size_t)took) >= cat->left)
cat->is_truncated = -1, lu_took = cat->left - 1;
cat->cursor += lu_took, cat->left -= lu_took;
}
#endif /* once --> */
/** Can print 4 things at once before it overwrites. One must set
{LIST_TO_STRING} to a function implementing {<T>ToString} to get this
functionality.
@return Prints the {list} in a static buffer.
@order \Theta(1); it has a 255 character limit; every element takes some of it.
@allow */
static char *T_U_(List, ToString)(const struct T_(List) *const list) {
static char buffer[4][256];
static int buffer_i;
struct List_SuperCat cat;
char scratch[12];
struct PT_(X) *x;
assert(strlen(list_cat_alter_end) >= strlen(list_cat_end));
assert(sizeof buffer > strlen(list_cat_alter_end));
list_super_cat_init(&cat, buffer[buffer_i],
sizeof *buffer / sizeof **buffer - strlen(list_cat_alter_end));
buffer_i++, buffer_i &= 3;
if(!list) {
list_super_cat(&cat, list_cat_null);
return cat.print;
}
list_super_cat(&cat, list_cat_start);
for(x = list->head.U_(next); x->U_(next); x = x->U_(next)) {
if(x != list->head.U_(next)) list_super_cat(&cat, list_cat_sep);
PT_(to_string)(&PT_(node_holds_x)(x)->data, &scratch),
scratch[sizeof scratch - 1] = '\0';
list_super_cat(&cat, scratch);
if(cat.is_truncated) break;
}
sprintf(cat.cursor, "%s",
cat.is_truncated ? list_cat_alter_end : list_cat_end);
return cat.print;
}
#endif /* print --> */
static void PT_U_(sub_unused, coda)(void);
/** This silences unused function warnings from the pre-processor, but allows
optimisation, (hopefully.)
\url{ http://stackoverflow.com/questions/43841780/silencing-unused-static-function-warnings-for-a-section-of-code } */
static void PT_U_(sub_unused, list)(void) {
T_U_(List, Next)(0);
T_U_(List, Previous)(0);
T_U_(List, First)(0);
T_U_(List, Last)(0);
T_U_(List, Shift)(0);
T_U_(List, Pop)(0);
#ifdef LIST_U_COMPARATOR /* <-- comp */
T_U_(List, Sort)(0);
T_U_(List, Compare)(0, 0);
T_U_(List, TakeSubtraction)(0, 0, 0);
T_U_(List, TakeUnion)(0, 0, 0);
T_U_(List, TakeIntersection)(0, 0, 0);
T_U_(List, TakeXor)(0, 0, 0);
#endif /* comp --> */
T_U_(List, TakeIf)(0, 0, 0);
T_U_(List, BiTakeIf)(0, 0, 0, 0);
T_U_(List, ForEach)(0, 0);
T_U_(List, BiForEach)(0, 0, 0);
T_U_(List, ShortCircuit)(0, 0);
T_U_(List, BiShortCircuit)(0, 0, 0);
#ifdef LIST_TO_STRING /* <-- string */
T_U_(List, ToString)(0);
#endif /* string --> */
PT_U_(cycle, crash)(0);
PT_U_(sub_unused, coda)();
}
/** {clang}'s pre-processor is not fooled. */
static void PT_U_(sub_unused, coda)(void) {
PT_U_(sub_unused, list)();
}
/* Un-define stuff for the next. */
#undef LIST_U_NAME
#ifdef LIST_U_COMPARATOR /* <-- comp */
#undef LIST_U_COMPARATOR
#endif /* comp --> */
#endif /* LIST_U_NAME --> */
|
density.h | // Copyright (c) 2013-2016 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file density.h
*
* \brief Contains definition and partial implementation of sirius::Density class.
*/
#ifndef __DENSITY_H__
#define __DENSITY_H__
#include "periodic_function.h"
#include "k_point_set.h"
#include "simulation_context.h"
#ifdef __GPU
extern "C" void generate_dm_pw_gpu(int num_atoms__,
int num_gvec_loc__,
int num_beta__,
double const* atom_pos__,
int const* gvec__,
double* phase_factors__,
double const* dm__,
double* dm_pw__,
int stream_id__);
extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__,
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
double_complex* rho_pw__,
int stream_id__);
extern "C" void update_density_rg_1_gpu(int size__,
cuDoubleComplex const* psi_rg__,
double wt__,
double* density_rg__);
#endif
namespace sirius
{
/// Generate charge density and magnetization from occupied spinor wave-functions.
/** Let's start from the definition of the complex density matrix:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) =
* \sum_{j{\bf k}} n_{j{\bf k}} \Psi_{j{\bf k}}^{\sigma*}({\bf r}) \Psi_{j{\bf k}}^{\sigma'}({\bf r}) =
* \frac{1}{2} \left( \begin{array}{cc} \rho({\bf r})+m_z({\bf r}) &
* m_x({\bf r})-im_y({\bf r}) \\ m_x({\bf r})+im_y({\bf r}) & \rho({\bf r})-m_z({\bf r}) \end{array} \right)
* \f]
* We notice that the diagonal components of the density matrix are actually real and the off-diagonal components are
* expressed trough two independent functions \f$ m_x({\bf r}) \f$ and \f$ m_y({\bf r}) \f$. Having this in mind we
* will work with a slightly different object, namely a real density matrix, defined as a 1-, 2- or 4-dimensional
* (depending on the number of magnetic components) vector with the following elements:
* - \f$ [ \rho({\bf r}) ] \f$ in case of non-magnetic configuration
* - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}) ] =
* [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2} ] \f$ in case of collinear
* magnetic configuration
* - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}),
* 2 \Re \rho_{\uparrow \downarrow}({\bf r}), -2 \Im \rho_{\uparrow \downarrow}({\bf r}) ] =
* [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2},
* m_x({\bf r}), m_y({\bf r}) ] \f$ in the general case of non-collinear magnetic configuration
*
* At this point it is straightforward to compute the density and magnetization in the interstitial (see add_k_point_contribution_rg()).
* The muffin-tin part of the density and magnetization is obtained in a slighlty more complicated way. Recall the
* expansion of spinor wave-functions inside the muffin-tin \f$ \alpha \f$
* \f[
* \Psi_{j{\bf k}}^{\sigma}({\bf r}) = \sum_{\xi}^{N_{\xi}^{\alpha}} {S_{\xi}^{\sigma j {\bf k},\alpha}}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}(\hat {\bf r})
* \f]
* which we insert into expression for the complex density matrix:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) = \sum_{j{\bf k}} n_{j{\bf k}} \sum_{\xi}^{N_{\xi}^{\alpha}}
* S_{\xi}^{\sigma j {\bf k},\alpha*} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)
* Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r}) \sum_{\xi'}^{N_{\xi'}^{\alpha}} S_{\xi'}^{\sigma' j{\bf k},\alpha}
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r})
* \f]
* First, we eliminate a sum over bands and k-points by forming an auxiliary density tensor:
* \f[
* D_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}} S_{\xi}^{\sigma j {\bf k},\alpha*}
* S_{\xi'}^{\sigma' j {\bf k},\alpha}
* \f]
* The expression for complex density matrix simplifies to:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) = \sum_{\xi \xi'} D_{\xi \sigma, \xi' \sigma'}^{\alpha}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r})
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r})
* \f]
* Now we can switch to the real density matrix and write its' expansion in real spherical harmonics. Let's take
* non-magnetic case as an example:
* \f[
* \rho({\bf r}) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r})
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r}) =
* \sum_{\ell_3 m_3} \rho_{\ell_3 m_3}^{\alpha}(r) R_{\ell_3 m_3}(\hat {\bf r})
* \f]
* where
* \f[
* \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r) \langle Y_{\ell_{\xi}m_{\xi}} | R_{\ell_3 m_3} | Y_{\ell_{\xi'}m_{\xi'}} \rangle
* \f]
* We are almost done. Now it is time to switch to the full index notation \f$ \xi \rightarrow \{ \ell \lambda m \} \f$
* and sum over \a m and \a m' indices:
* \f[
* \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\ell \lambda, \ell' \lambda'} f_{\ell \lambda}^{\alpha}(r)
* f_{\ell' \lambda'}^{\alpha}(r) d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha}
* \f]
* where
* \f[
* d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} =
* \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha}
* \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle
* \f]
* This is our final answer: radial components of density and magnetization are expressed as a linear combination of
* quadratic forms in radial functions.
*
* \note density and potential are allocated as global function because it's easier to load and save them. */
class Density
{
private:
Simulation_context& ctx_;
Unit_cell& unit_cell_;
/// Density matrix of the system.
/** In case of full-potential, matrix is stored for local fraction of atoms.
* In case of pseudo-potential, full matrix for all atoms is stored. */
mdarray<double_complex, 4> density_matrix_;
struct paw_density_data_t
{
Atom *atom_{nullptr};
int ia{-1};
/// ae and ps local densities
mdarray<double, 2> ae_density_; // TODO: -> Spheric_function
mdarray<double, 2> ps_density_;
mdarray<double, 3> ae_magnetization_;
mdarray<double, 3> ps_magnetization_;
};
std::vector<paw_density_data_t> paw_density_data_;
/// Pointer to charge density.
/** In the case of full-potential calculation this is the full (valence + core) electron charge density.
* In the case of pseudopotential this is the valence charge density. */
Periodic_function<double>* rho_;
/// Density and magnetization on the coarse FFT mesh.
/** Coarse FFT grid is enough to generate density and magnetization from the wave-functions. The components
* of the <tt>rho_mag_coarse</tt> vector have the following order:
* \f$ \{\rho({\bf r}), m_z({\bf r}), m_x({\bf r}), m_y({\bf r}) \} \f$. */
std::array<std::unique_ptr<experimental::Smooth_periodic_function<double>>, 4> rho_mag_coarse_;
/// Pointer to pseudo core charge density
/** In the case of pseudopotential we need to know the non-linear core correction to the
* exchange-correlation energy which is introduced trough the pseudo core density:
* \f$ E_{xc}[\rho_{val} + \rho_{core}] \f$. The 'pseudo' reflects the fact that
* this density integrated does not reproduce the total number of core elctrons. */
Periodic_function<double>* rho_pseudo_core_{nullptr};
Periodic_function<double>* magnetization_[3];
/// Non-zero Gaunt coefficients.
std::unique_ptr<Gaunt_coefficients<double_complex>> gaunt_coefs_{nullptr};
/// Fast mapping between composite lm index and corresponding orbital quantum number.
std::vector<int> l_by_lm_;
std::unique_ptr<Mixer<double_complex>> high_freq_mixer_{nullptr};
std::unique_ptr<Mixer<double_complex>> low_freq_mixer_{nullptr};
std::unique_ptr<Mixer<double>> mixer_{nullptr};
std::vector<int> lf_gvec_;
std::vector<int> hf_gvec_;
/// Allocate PAW data.
void init_paw();
void generate_paw_atom_density(paw_density_data_t &pdd);
/// Initialize \rho_{ij} - density matrix, occupation on basis of beta-projectors (used for PAW).
void init_density_matrix_for_paw();
/// Symmetrize density matrix.
/** Initially, density matrix is obtained with summation over irreducible BZ:
* \f[
* \tilde n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} =
* \sum_{j} \sum_{{\bf k}}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}}
* \langle \Psi_{j{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle
* \f]
* In order to symmetrize it, the following operation is performed:
* \f[
* n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}}
* \sum_{j} \sum_{\bf k}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf P}{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}}
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle
* \f]
* where \f$ {\bf P} \f$ is the space-group symmetry operation. The inner product between wave-function and
* local orbital is transformed as:
* \f[
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle =
* \int \Psi_{j{\bf P}{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr =
* \int \Psi_{j{\bf k}}^{\sigma *}({\bf P}^{-1}{\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr =
* \int \Psi_{j{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{{\bf P}\alpha}(r) Y_{\ell m}({\bf P} \hat{\bf r}) dr
* \f]
* Under rotation the spherical harmonic is transformed as:
* \f[
* Y_{\ell m}({\bf P} \hat{\bf r}) = {\bf P}^{-1}Y_{\ell m}(\hat {\bf r}) = \sum_{m'} D_{m'm}^{\ell}({\bf P}^{-1}) Y_{\ell m'}(\hat {\bf r}) =
* \sum_{m'} D_{mm'}^{\ell}({\bf P}) Y_{\ell m'}(\hat {\bf r})
* \f]
* The inner-product integral is then rewritten as:
* \f[
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle =
* \sum_{m'} D_{mm'}^{\ell}({\bf P}) \langle \Psi_{j{\bf k}}^{\sigma} | u_{\ell \lambda}^{{\bf P}\alpha} Y_{\ell m} \rangle
* \f]
* and the final expression for density matrix gets the following form:
* \f[
* n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}}
* \sum_{j} \sum_{\bf k}^{IBZ} \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P})
* \langle Y_{\ell m_1} u_{\ell \lambda}^{{\bf P} \alpha}|
* \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}} \langle \Psi_{j{\bf k}}^{\sigma'} |
* u_{\ell' \lambda'}^{{\bf P}\alpha} Y_{\ell' m_2} \rangle = \sum_{{\bf P}}
* \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P})
* \tilde n_{\ell \lambda m_1 \sigma, \ell' \lambda' m_2 \sigma'}^{{\bf P}\alpha}
* \f]
*/
void symmetrize_density_matrix();
/// Reduce complex density matrix over magnetic quantum numbers
/** The following operation is performed:
* \f[
* d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} =
* \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha}
* \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle
* \f]
*/
template <int num_mag_dims, typename T>
void reduce_density_matrix(Atom_type const& atom_type__,
int ia__,
mdarray<double_complex, 4> const& zdens__,
Gaunt_coefficients<T> const& gaunt_coeffs__,
mdarray<double, 3>& mt_density_matrix__)
{
mt_density_matrix__.zero();
#pragma omp parallel for default(shared)
for (int idxrf2 = 0; idxrf2 < atom_type__.mt_radial_basis_size(); idxrf2++) {
int l2 = atom_type__.indexr(idxrf2).l;
for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) {
int offs = idxrf2 * (idxrf2 + 1) / 2 + idxrf1;
int l1 = atom_type__.indexr(idxrf1).l;
int xi2 = atom_type__.indexb().index_by_idxrf(idxrf2);
for (int lm2 = Utils::lm_by_l_m(l2, -l2); lm2 <= Utils::lm_by_l_m(l2, l2); lm2++, xi2++) {
int xi1 = atom_type__.indexb().index_by_idxrf(idxrf1);
for (int lm1 = Utils::lm_by_l_m(l1, -l1); lm1 <= Utils::lm_by_l_m(l1, l1); lm1++, xi1++) {
for (int k = 0; k < gaunt_coeffs__.num_gaunt(lm1, lm2); k++) {
int lm3 = gaunt_coeffs__.gaunt(lm1, lm2, k).lm3;
T gc = gaunt_coeffs__.gaunt(lm1, lm2, k).coef;
switch (num_mag_dims) {
case 3: {
mt_density_matrix__(lm3, offs, 2) += 2.0 * std::real(zdens__(xi1, xi2, 2, ia__) * gc);
mt_density_matrix__(lm3, offs, 3) -= 2.0 * std::imag(zdens__(xi1, xi2, 2, ia__) * gc);
}
case 1: {
mt_density_matrix__(lm3, offs, 1) += std::real(zdens__(xi1, xi2, 1, ia__) * gc);
}
case 0: {
mt_density_matrix__(lm3, offs, 0) += std::real(zdens__(xi1, xi2, 0, ia__) * gc);
}
}
}
}
}
}
}
}
/// Add k-point contribution to the auxiliary density matrix.
/** In case of full-potential LAPW complex density matrix has the following expression:
* \f[
* d_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}}
* S_{\xi}^{\sigma j {\bf k},\alpha*} S_{\xi'}^{\sigma' j {\bf k},\alpha}
* \f]
*
* where \f$ S_{\xi}^{\sigma j {\bf k},\alpha} \f$ are the expansion coefficients of
* spinor wave functions inside muffin-tin spheres.
*
* In case of LDA+U the occupation matrix is also computed. It has the following expression:
* \f[
* n_{\ell,mm'}^{\sigma \sigma'} = \sum_{i {\bf k}}^{occ} \int_{0}^{R_{MT}} r^2 dr
* \Psi_{\ell m}^{i{\bf k}\sigma *}({\bf r}) \Psi_{\ell m'}^{i{\bf k}\sigma'}({\bf r})
* \f]
*
* In case of ultrasoft pseudopotential the following density matrix has to be computed for each atom:
* \f[
* d_{\xi \xi'}^{\alpha} = \langle \beta_{\xi}^{\alpha} | \hat N | \beta_{\xi'}^{\alpha} \rangle =
* \sum_{j {\bf k}} \langle \beta_{\xi}^{\alpha} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}}
* \langle \Psi_{j{\bf k}} | \beta_{\xi'}^{\alpha} \rangle
* \f]
* Here \f$ \hat N = \sum_{j{\bf k}} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}} \langle \Psi_{j{\bf k}} | \f$ is
* the occupancy operator written in spectral representation. */
template <typename T>
inline void add_k_point_contribution_dm(K_point* kp__,
mdarray<double_complex, 4>& density_matrix__);
/// Add k-point contribution to the density and magnetization defined on the regular FFT grid.
inline void add_k_point_contribution_rg(K_point* kp__);
/// Generate valence density in the muffin-tins
void generate_valence_mt(K_point_set& ks);
/// Generate charge density of core states
void generate_core_charge_density()
{
PROFILE("sirius::Density::generate_core_charge_density");
for (int icloc = 0; icloc < unit_cell_.spl_num_atom_symmetry_classes().local_size(); icloc++) {
int ic = unit_cell_.spl_num_atom_symmetry_classes(icloc);
unit_cell_.atom_symmetry_class(ic).generate_core_charge_density(ctx_.core_relativity());
}
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
int rank = unit_cell_.spl_num_atom_symmetry_classes().local_rank(ic);
unit_cell_.atom_symmetry_class(ic).sync_core_charge_density(ctx_.comm(), rank);
}
}
void generate_pseudo_core_charge_density()
{
PROFILE("sirius::Density::generate_pseudo_core_charge_density");
auto v = unit_cell_.make_periodic_function([this](int iat, double g)
{
return ctx_.radial_integrals().pseudo_core_radial_integral(iat, g);
},
ctx_.gvec());
ctx_.fft().transform<1>(ctx_.gvec().partition(), &v[ctx_.gvec().partition().gvec_offset_fft()]);
ctx_.fft().output(&rho_pseudo_core_->f_rg(0));
}
public:
/// Constructor
Density(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx_.unit_cell())
{
rho_ = new Periodic_function<double>(ctx_, ctx_.lmmax_rho(), 1);
for (int i = 0; i < ctx_.num_mag_dims() + 1; i++) {
rho_mag_coarse_[i] = std::unique_ptr<experimental::Smooth_periodic_function<double>>(new experimental::Smooth_periodic_function<double>(ctx_.fft_coarse(), ctx_.gvec_coarse()));
}
/* core density of the pseudopotential method */
if (!ctx_.full_potential()) {
rho_pseudo_core_ = new Periodic_function<double>(ctx_, 0, false);
rho_pseudo_core_->zero();
generate_pseudo_core_charge_density();
}
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
magnetization_[i] = new Periodic_function<double>(ctx_, ctx_.lmmax_rho(), 1);
}
using gc_z = Gaunt_coefficients<double_complex>;
switch (ctx_.esm_type()) {
case electronic_structure_method_t::full_potential_lapwlo: {
gaunt_coefs_ = std::unique_ptr<gc_z>(new gc_z(ctx_.lmax_apw(), ctx_.lmax_rho(), ctx_.lmax_apw(), SHT::gaunt_hybrid));
break;
}
case electronic_structure_method_t::full_potential_pwlo: {
gaunt_coefs_ = std::unique_ptr<gc_z>(new gc_z(ctx_.lmax_pw(), ctx_.lmax_rho(), ctx_.lmax_pw(), SHT::gaunt_hybrid));
break;
}
default : {
break;
}
}
l_by_lm_ = Utils::l_by_lm(ctx_.lmax_rho());
density_matrix_ = mdarray<double_complex, 4>(unit_cell_.max_mt_basis_size(), unit_cell_.max_mt_basis_size(),
ctx_.num_mag_comp(), unit_cell_.num_atoms());
if (!ctx_.full_potential()) {
lf_gvec_ = std::vector<int>(ctx_.gvec_coarse().num_gvec());
std::vector<double> weights(ctx_.gvec_coarse().num_gvec() * (1 + ctx_.num_mag_dims()), 1.0);
for (size_t i = 0; i < density_matrix_.size(); i++) {
weights.push_back(0);
}
weights[0] = 0;
lf_gvec_[0] = 0;
for (int ig = 1; ig < ctx_.gvec_coarse().num_gvec(); ig++) {
auto G = ctx_.gvec_coarse().gvec(ig);
/* save index of low-frequency G-vector */
lf_gvec_[ig] = ctx_.gvec().index_by_gvec(G);
weights[ig] = fourpi * unit_cell_.omega() / std::pow(ctx_.gvec_coarse().gvec_len(ig), 2);
}
/* find high-frequency G-vectors */
for (int ig = 0; ig < ctx_.gvec().num_gvec(); ig++) {
if (ctx_.gvec().gvec_len(ig) > 2 * ctx_.gk_cutoff()) {
hf_gvec_.push_back(ig);
}
}
if (static_cast<int>(hf_gvec_.size()) != ctx_.gvec().num_gvec() - ctx_.gvec_coarse().num_gvec()) {
std::stringstream s;
s << "Wrong count of high-frequency G-vectors" << std::endl
<< "number of found high-frequency G-vectors: " << hf_gvec_.size() << std::endl
<< "expected number of high-frequency G-vectors: " << ctx_.gvec().num_gvec() - ctx_.gvec_coarse().num_gvec() << std::endl
<< "G-vector cutoff: " << ctx_.gk_cutoff();
TERMINATE(s);
}
if (hf_gvec_.size()) {
high_freq_mixer_ = Mixer_factory<double_complex>("linear",
hf_gvec_.size() * (1 + ctx_.num_mag_dims()),
ctx_.mixer_input_section(),
ctx_.comm());
}
low_freq_mixer_ = Mixer_factory<double_complex>(ctx_.mixer_input_section().type_,
lf_gvec_.size() * (1 + ctx_.num_mag_dims()) + density_matrix_.size(),
ctx_.mixer_input_section(),
ctx_.comm(),
weights);
}
if (ctx_.full_potential()) {
mixer_ = Mixer_factory<double>(ctx_.mixer_input_section().type_, size(), ctx_.mixer_input_section(), ctx_.comm());
}
}
/// Destructor
~Density()
{
delete rho_;
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
delete magnetization_[j];
}
if (rho_pseudo_core_ != nullptr) {
delete rho_pseudo_core_;
}
}
/// Set pointers to muffin-tin and interstitial charge density arrays
void set_charge_density_ptr(double* rhomt, double* rhorg)
{
if (ctx_.full_potential() && rhomt) {
rho_->set_mt_ptr(rhomt);
}
if (rhorg) {
rho_->set_rg_ptr(rhorg);
}
}
/// Set pointers to muffin-tin and interstitial magnetization arrays
void set_magnetization_ptr(double* magmt, double* magir)
{
if (ctx_.num_mag_dims() == 0) {
return;
}
assert(ctx_.num_spins() == 2);
// set temporary array wrapper
mdarray<double, 4> magmt_tmp(magmt, ctx_.lmmax_rho(), unit_cell_.max_num_mt_points(),
unit_cell_.num_atoms(), ctx_.num_mag_dims());
mdarray<double, 2> magir_tmp(magir, ctx_.fft().size(), ctx_.num_mag_dims());
if (ctx_.num_mag_dims() == 1) {
/* z component is the first and only one */
if (magmt) {
magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0));
}
if (magir) {
magnetization_[0]->set_rg_ptr(&magir_tmp(0, 0));
}
}
if (ctx_.num_mag_dims() == 3) {
if (magmt) {
/* z component is the first */
magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 2));
/* x component is the second */
magnetization_[1]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0));
/* y component is the third */
magnetization_[2]->set_mt_ptr(&magmt_tmp(0, 0, 0, 1));
}
if (magir) {
/* z component is the first */
magnetization_[0]->set_rg_ptr(&magir_tmp(0, 2));
/* x component is the second */
magnetization_[1]->set_rg_ptr(&magir_tmp(0, 0));
/* y component is the third */
magnetization_[2]->set_rg_ptr(&magir_tmp(0, 1));
}
}
}
/// Zero density and magnetization
void zero()
{
rho_->zero();
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
magnetization_[i]->zero();
}
}
/// Find the total leakage of the core states out of the muffin-tins
double core_leakage()
{
double sum = 0.0;
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
sum += core_leakage(ic) * unit_cell_.atom_symmetry_class(ic).num_atoms();
}
return sum;
}
/// Return core leakage for a specific atom symmetry class
double core_leakage(int ic)
{
return unit_cell_.atom_symmetry_class(ic).core_leakage();
}
/// Generate initial charge density and magnetization
void initial_density();
void initial_density_pseudo();
void initial_density_full_pot();
/// Check total density for the correct number of electrons.
inline void check_num_electrons()
{
double nel{0};
if (ctx_.full_potential()) {
std::vector<double> nel_mt;
double nel_it;
nel = rho_->integrate(nel_mt, nel_it);
} else {
nel = rho_->f_pw(0).real() * unit_cell_.omega();
}
/* check the number of electrons */
if (std::abs(nel - unit_cell_.num_electrons()) > 1e-5) {
std::stringstream s;
s << "wrong number of electrons" << std::endl
<< " obtained value : " << nel << std::endl
<< " target value : " << unit_cell_.num_electrons() << std::endl
<< " difference : " << std::abs(nel - unit_cell_.num_electrons()) << std::endl;
if (ctx_.full_potential()) {
s << " total core leakage : " << core_leakage();
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
s << std::endl << " atom class : " << ic << ", core leakage : " << core_leakage(ic);
}
}
WARNING(s);
}
}
/// Generate full charge density (valence + core) and magnetization from the wave functions.
/** This function calls generate_valence() and then in case of full-potential LAPW method adds a core density
* to get the full charge density of the system. */
inline void generate(K_point_set& ks__)
{
PROFILE("sirius::Density::generate");
generate_valence(ks__);
if (ctx_.full_potential()) {
/* find the core states */
generate_core_charge_density();
/* add core contribution */
for (int ialoc = 0; ialoc < (int)unit_cell_.spl_num_atoms().local_size(); ialoc++) {
int ia = unit_cell_.spl_num_atoms(ialoc);
for (int ir = 0; ir < unit_cell_.atom(ia).num_mt_points(); ir++) {
rho_->f_mt<index_domain_t::local>(0, ir, ialoc) += unit_cell_.atom(ia).symmetry_class().core_charge_density(ir) / y00;
}
}
/* synchronize muffin-tin part */
rho_->sync_mt();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->sync_mt();
}
}
}
/// Generate valence charge density and magnetization from the wave functions.
/** The interstitial density is generated on the coarse FFT grid and then transformed to the PW domain.
* After symmetrization and mixing and before the generation of the XC potential density is transformted to the
* real-space domain and checked for the number of electrons. */
inline void generate_valence(K_point_set& ks__);
/// Add augmentation charge Q(r).
/** Restore valence density by adding the Q-operator constribution.
* The following term is added to the valence density, generated by the pseudo wave-functions:
* \f[
* \tilde \rho({\bf G}) = \sum_{\alpha} \sum_{\xi \xi'} d_{\xi \xi'}^{\alpha} Q_{\xi' \xi}^{\alpha}({\bf G})
* \f]
* Plane-wave coefficients of the Q-operator for a given atom \f$ \alpha \f$ can be obtained from the
* corresponding coefficients of the Q-operator for a given atom \a type A:
* \f[
* Q_{\xi' \xi}^{\alpha(A)}({\bf G}) = e^{-i{\bf G}\tau_{\alpha(A)}} Q_{\xi' \xi}^{A}({\bf G})
* \f]
* We use this property to split the sum over atoms into sum over atom types and inner sum over atoms of the
* same type:
* \f[
* \tilde \rho({\bf G}) = \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) \sum_{\alpha(A)}
* d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}} =
* \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) d_{\xi \xi'}^{A}({\bf G})
* \f]
* where
* \f[
* d_{\xi \xi'}^{A}({\bf G}) = \sum_{\alpha(A)} d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}}
* \f]
*/
void augment(K_point_set& ks__)
{
PROFILE("sirius::Density::augment");
/*check if we need to augment charge density and magnetization */
bool need_to_augment{false};
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
need_to_augment |= unit_cell_.atom_type(iat).pp_desc().augment;
}
if (!need_to_augment) {
return;
}
/* collect density and magnetization into single array */
std::vector<Periodic_function<double>*> rho_vec(ctx_.num_mag_dims() + 1);
rho_vec[0] = rho_;
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
rho_vec[1 + j] = magnetization_[j];
}
if (ctx_.control().print_checksum_) {
for (auto e: rho_vec) {
auto cs = e->checksum_pw();
DUMP("checksum(rho_vec_pw): %20.14f %20.14f", cs.real(), cs.imag());
}
}
mdarray<double_complex, 2> rho_aug(ctx_.gvec_count(), ctx_.num_mag_dims() + 1, ctx_.dual_memory_t());
switch (ctx_.processing_unit()) {
case CPU: {
generate_rho_aug<CPU>(rho_vec, rho_aug);
break;
}
case GPU: {
generate_rho_aug<GPU>(rho_vec, rho_aug);
break;
}
}
for (int iv = 0; iv < ctx_.num_mag_dims() + 1; iv++) {
#pragma omp parallel for
for (int igloc = 0; igloc < ctx_.gvec_count(); igloc++) {
int ig = ctx_.gvec_offset() + igloc;
rho_vec[iv]->f_pw(ig) += rho_aug(igloc, iv);
}
}
sddk::timer t5("sirius::Density::augment|mpi");
for (auto e: rho_vec) {
ctx_.comm().allgather(&e->f_pw(0), ctx_.gvec_offset(), ctx_.gvec_count());
if (ctx_.control().print_checksum_) {
auto cs = e->checksum_pw();
DUMP("checksum(rho_vec_pw): %20.14f %20.14f", cs.real(), cs.imag());
}
}
t5.stop();
}
template <device_t pu>
inline void generate_rho_aug(std::vector<Periodic_function<double>*> rho__,
mdarray<double_complex, 2>& rho_aug__);
/// Check density at MT boundary
void check_density_continuity_at_mt();
mdarray<double, 2> generate_rho_radial_integrals(int type__);
void generate_pw_coefs()
{
rho_->fft_transform(-1);
}
void save()
{
if (ctx_.comm().rank() == 0)
{
HDF5_tree fout(storage_file_name, false);
rho_->hdf5_write(fout["density"]);
for (int j = 0; j < ctx_.num_mag_dims(); j++)
magnetization_[j]->hdf5_write(fout["magnetization"].create_node(j));
}
ctx_.comm().barrier();
}
void load()
{
HDF5_tree fout(storage_file_name, false);
rho_->hdf5_read(fout["density"]);
for (int j = 0; j < ctx_.num_mag_dims(); j++)
magnetization_[j]->hdf5_read(fout["magnetization"][j]);
}
void save_to_xsf()
{
//== FILE* fout = fopen("unit_cell.xsf", "w");
//== fprintf(fout, "CRYSTAL\n");
//== fprintf(fout, "PRIMVEC\n");
//== auto& lv = unit_cell_.lattice_vectors();
//== for (int i = 0; i < 3; i++)
//== {
//== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i));
//== }
//== fprintf(fout, "CONVVEC\n");
//== for (int i = 0; i < 3; i++)
//== {
//== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i));
//== }
//== fprintf(fout, "PRIMCOORD\n");
//== fprintf(fout, "%i 1\n", unit_cell_.num_atoms());
//== for (int ia = 0; ia < unit_cell_.num_atoms(); ia++)
//== {
//== auto pos = unit_cell_.get_cartesian_coordinates(unit_cell_.atom(ia).position());
//== fprintf(fout, "%i %18.12f %18.12f %18.12f\n", unit_cell_.atom(ia).zn(), pos[0], pos[1], pos[2]);
//== }
//== fclose(fout);
}
void save_to_xdmf()
{
//== mdarray<double, 3> rho_grid(&rho_->f_it<global>(0), fft_->size(0), fft_->size(1), fft_->size(2));
//== mdarray<double, 4> pos_grid(3, fft_->size(0), fft_->size(1), fft_->size(2));
//== mdarray<double, 4> mag_grid(3, fft_->size(0), fft_->size(1), fft_->size(2));
//== mag_grid.zero();
//== // loop over 3D array (real space)
//== for (int j0 = 0; j0 < fft_->size(0); j0++)
//== {
//== for (int j1 = 0; j1 < fft_->size(1); j1++)
//== {
//== for (int j2 = 0; j2 < fft_->size(2); j2++)
//== {
//== int ir = static_cast<int>(j0 + j1 * fft_->size(0) + j2 * fft_->size(0) * fft_->size(1));
//== // get real space fractional coordinate
//== double frv[] = {double(j0) / fft_->size(0),
//== double(j1) / fft_->size(1),
//== double(j2) / fft_->size(2)};
//== vector3d<double> rv = ctx_.unit_cell()->get_cartesian_coordinates(vector3d<double>(frv));
//== for (int x = 0; x < 3; x++) pos_grid(x, j0, j1, j2) = rv[x];
//== if (ctx_.num_mag_dims() == 1) mag_grid(2, j0, j1, j2) = magnetization_[0]->f_it<global>(ir);
//== if (ctx_.num_mag_dims() == 3)
//== {
//== mag_grid(0, j0, j1, j2) = magnetization_[1]->f_it<global>(ir);
//== mag_grid(1, j0, j1, j2) = magnetization_[2]->f_it<global>(ir);
//== }
//== }
//== }
//== }
//== HDF5_tree h5_rho("rho.hdf5", true);
//== h5_rho.write("rho", rho_grid);
//== h5_rho.write("pos", pos_grid);
//== h5_rho.write("mag", mag_grid);
//== FILE* fout = fopen("rho.xdmf", "w");
//== //== fprintf(fout, "<?xml version=\"1.0\" ?>\n"
//== //== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n"
//== //== "<Xdmf>\n"
//== //== " <Domain Name=\"name1\">\n"
//== //== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n"
//== //== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n"
//== //== " <Geometry GeometryType=\"XYZ\">\n"
//== //== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n"
//== //== " </Geometry>\n"
//== //== " <Attribute\n"
//== //== " AttributeType=\"Scalar\"\n"
//== //== " Center=\"Node\"\n"
//== //== " Name=\"rho\">\n"
//== //== " <DataItem\n"
//== //== " NumberType=\"Float\"\n"
//== //== " Precision=\"8\"\n"
//== //== " Dimensions=\"%i %i %i\"\n"
//== //== " Format=\"HDF\">\n"
//== //== " rho.hdf5:/rho\n"
//== //== " </DataItem>\n"
//== //== " </Attribute>\n"
//== //== " </Grid>\n"
//== //== " </Domain>\n"
//== //== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2));
//== fprintf(fout, "<?xml version=\"1.0\" ?>\n"
//== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n"
//== "<Xdmf>\n"
//== " <Domain Name=\"name1\">\n"
//== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n"
//== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n"
//== " <Geometry GeometryType=\"XYZ\">\n"
//== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n"
//== " </Geometry>\n"
//== " <Attribute\n"
//== " AttributeType=\"Vector\"\n"
//== " Center=\"Node\"\n"
//== " Name=\"mag\">\n"
//== " <DataItem\n"
//== " NumberType=\"Float\"\n"
//== " Precision=\"8\"\n"
//== " Dimensions=\"%i %i %i 3\"\n"
//== " Format=\"HDF\">\n"
//== " rho.hdf5:/mag\n"
//== " </DataItem>\n"
//== " </Attribute>\n"
//== " </Grid>\n"
//== " </Domain>\n"
//== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2));
//== fclose(fout);
}
inline size_t size()
{
size_t s = rho_->size();
for (int i = 0; i < ctx_.num_mag_dims(); i++) s += magnetization_[i]->size();
return s;
}
Periodic_function<double>* rho()
{
return rho_;
}
Periodic_function<double>* rho_pseudo_core()
{
return rho_pseudo_core_;
}
Periodic_function<double>** magnetization()
{
return magnetization_;
}
Periodic_function<double>* magnetization(int i)
{
return magnetization_[i];
}
Spheric_function<spectral, double> const& density_mt(int ialoc) const
{
return rho_->f_mt(ialoc);
}
/// generate n_1 and \tilda{n}_1 in lm components
void generate_paw_loc_density();
mdarray<double, 2> const& ae_paw_atom_density(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ae_density_;
}
mdarray<double, 2> const& ps_paw_atom_density(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ps_density_;
}
mdarray<double, 3> const& ae_paw_atom_magn(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ae_magnetization_;
}
mdarray<double, 3> const& ps_paw_atom_magn(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ps_magnetization_;
}
void allocate()
{
rho_->allocate_mt(true);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->allocate_mt(true);
}
}
void mixer_input()
{
if (mixer_ != nullptr) {
size_t n = rho_->pack(0, *mixer_);
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
n += magnetization_[i]->pack(n, *mixer_);
}
} else {
int k = 0;
for (int ig: lf_gvec_) {
low_freq_mixer_->input(k++, rho_->f_pw(ig));
}
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
for (int ig: lf_gvec_) {
low_freq_mixer_->input(k++, magnetization_[j]->f_pw(ig));
}
}
for (size_t i = 0; i < density_matrix_.size(); i++) {
low_freq_mixer_->input(k++, density_matrix_[i]);
}
if (high_freq_mixer_) {
k = 0;
for (int ig: hf_gvec_) {
high_freq_mixer_->input(k++, rho_->f_pw(ig));
}
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
for (int ig: hf_gvec_) {
high_freq_mixer_->input(k++, magnetization_[j]->f_pw(ig));
}
}
}
}
}
void mixer_output()
{
if (mixer_ != nullptr) {
size_t n = rho_->unpack(mixer_->output_buffer());
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
n += magnetization_[i]->unpack(&mixer_->output_buffer()[n]);
}
} else {
int k = 0;
for (int ig: lf_gvec_) {
rho_->f_pw(ig) = low_freq_mixer_->output_buffer(k++);
}
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
for (int ig: lf_gvec_) {
magnetization_[j]->f_pw(ig) = low_freq_mixer_->output_buffer(k++);
}
}
for (size_t i = 0; i < density_matrix_.size(); i++) {
density_matrix_[i] = low_freq_mixer_->output_buffer(k++);
}
if (high_freq_mixer_) {
k = 0;
for (int ig: hf_gvec_) {
rho_->f_pw(ig) = high_freq_mixer_->output_buffer(k++);
}
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
for (int ig: hf_gvec_) {
magnetization_[j]->f_pw(ig) = high_freq_mixer_->output_buffer(k++);
}
}
}
}
}
void mixer_init()
{
mixer_input();
if (mixer_ != nullptr) {
mixer_->initialize();
} else {
low_freq_mixer_->initialize();
if (high_freq_mixer_) {
high_freq_mixer_->initialize();
}
}
}
double mix()
{
double rms;
if (mixer_ != nullptr) {
STOP(); // TODO: redesign mixer
/* mix in real-space in case of FP-LAPW */
mixer_input();
rms = mixer_->mix();
mixer_output();
/* get rho(G) after mixing */
rho_->fft_transform(-1);
} else {
/* mix in G-space in case of PP */
mixer_input();
rms = low_freq_mixer_->mix();
if (high_freq_mixer_) {
rms += high_freq_mixer_->mix();
}
mixer_output();
}
return rms;
}
inline double dr2()
{
return low_freq_mixer_->rss();
}
mdarray<double_complex, 4> const& density_matrix() const
{
return density_matrix_;
}
inline void fft_transform(int direction__)
{
rho_->fft_transform(direction__);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->fft_transform(direction__);
}
}
};
#include "Density/initial_density.hpp"
#include "Density/add_k_point_contribution_rg.hpp"
#include "Density/add_k_point_contribution_dm.hpp"
#include "Density/generate_valence.hpp"
#include "Density/generate_rho_aug.hpp"
#include "Density/symmetrize_density_matrix.hpp"
#include "Density/generate_valence_mt.hpp"
#include "Density/generate_rho_radial_integrals.hpp"
#include "Density/check_density_continuity_at_mt.hpp"
#include "Density/paw_density.hpp"
}
#endif // __DENSITY_H__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.