source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
DECADES_decoupled.h | #pragma once
#include "DECADES.h"
__attribute__((noinline))
extern "C"
void compute_exclusive_store(int *addr, int value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
int compute_exclusive_fetch_add(int *addr, int value) {
int ret;
#pragma omp atomic capture
{
ret = addr[0];
addr[0] += value;
}
return ret;
}
__attribute__((noinline))
extern "C"
void compute_side_store(int *addr, int value) {
*addr = value;
}
__attribute__((noinline))
extern "C"
int compute_side_fetch_min(int *addr, int value) {
return DECADES_FETCH_MIN(addr, value);
}
|
nontemporal-2.c | /* { dg-do compile } */
#define N 1024
extern int a[N], b[N], c[N], d[N];
void
foo (void)
{
int i;
#pragma omp simd nontemporal (a, b) aligned (a, b, c)
for (i = 0; i < N; ++i)
a[i] = b[i] + c[i];
#pragma omp simd nontemporal (d) nontemporal (d) /* { dg-error "'d' appears more than once in 'nontemporal' clauses" } */
for (i = 0; i < N; ++i)
d[i] = 2 * c[i];
#pragma omp simd nontemporal (a, b, b) /* { dg-error "'b' appears more than once in 'nontemporal' clauses" } */
for (i = 0; i < N; ++i)
a[i] += b[i] + c[i];
}
|
Normalize.c | #include "Python.h"
#include "numpy/arrayobject.h"
#include <fcntl.h>
#include <it/math.h>
#include <it/types.h>
#include <omp.h>
#define VERSION "0.1"
int
compare_doubles(const void *a, const void *b) {
double x, y;
x = *(double *)a;
y = *(double *)b;
if(x > y)
return 1;
else if(x < y)
return -1;
return 0;
}
double
perc(const double *a, const double score, const unsigned int len) {
unsigned int i, k;
/* ! expects double *a to be sorted ! */
i = 0;
while(a[i] < score && i < len)
i++;
k = i;
while(a[k] <= score && k < len)
k++;
return (double)(i + k) / 2 / len;
}
double *
sort(const double *data, const unsigned int start, const unsigned int len) {
double *sorted;
sorted = malloc(len * sizeof(double));
if(!sorted) {
PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for sorted time series.");
return NULL;
}
if(!memcpy(sorted, data + start, len * sizeof(double))) {
PyErr_SetString(PyExc_MemoryError, "cannot copy memory.");
return NULL;
}
qsort(sorted, len, sizeof(double), compare_doubles);
return sorted;
}
void
normalize(const double *data, double *trans, const unsigned int num, const unsigned int len) {
unsigned int i, j, k;
double *prior;
//#pragma omp parallel for private(i,j,k,prior)
for(i = 0; i < num; i++) {
j = i * len;
prior = sort(data, j, len);
for(k = 0; k < len; k++)
trans[j + k] = 1.4142135623730951 * erfinv(2 * perc(prior, data[j + k], len) - 1);
free(prior);
}
}
static PyObject *
Normalize_Quantile(PyObject *self, PyObject* args) {
PyObject *arg;
PyArrayObject *data, *trans;
int nthreads;
nthreads = 0;
if(!PyArg_ParseTuple(args, "O|I", &arg, &nthreads))
return NULL;
data = (PyArrayObject *) PyArray_ContiguousFromObject(arg,
PyArray_DOUBLE, 2, 2);
trans = (PyArrayObject *) PyArray_ZEROS(2, data->dimensions, PyArray_DOUBLE, 0);
if(!data || !trans)
return NULL;
if(nthreads)
omp_set_num_threads(nthreads);
normalize((double *)data->data,
(double *)trans->data,
data->dimensions[0],
data->dimensions[1]);
Py_DECREF(data);
return PyArray_Return(trans);
}
static PyMethodDef Normalize_methods[] = {
{"Quantile", Normalize_Quantile, METH_VARARGS,
"trans = Quantile(data, num_threads)\n\nNormalize time series by quantile normalization to the normal distribution.\n\nParameters\n----------\ndata : array_like\nA 2-D array containing multiple variables and observations. Each row of `data` represents a variable, and each column a single observation of all those variables.\n\nnum_threads : int, optional\nThe maximum number of OpenMP threads used.\n\nReturns\n-------\ntrans : ndarray\nThe transformed input data array.\n"},
{NULL, NULL, 0, NULL}
};
void
initNormalize(void) {
PyObject *m;
PyObject *v;
v = Py_BuildValue("s", VERSION);
PyImport_AddModule("Normalize");
m = Py_InitModule3("Normalize", Normalize_methods,
"Normalize time series by quantile normalization to the normal distribution.");
PyModule_AddObject(m, "__version__", v);
import_array();
}
int
main(int argc, char **argv) {
Py_SetProgramName(argv[0]);
Py_Initialize();
initNormalize();
Py_Exit(0);
return 0;
}
|
GB_unop__minv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_bool_bool)
// op(A') function: GB (_unop_tran__minv_bool_bool)
// C type: bool
// A type: bool
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = true ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = true ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = true ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_convert_full_to_sparse.c | //------------------------------------------------------------------------------
// GB_convert_full_to_sparse: convert a matrix from full to sparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GB_convert_full_to_sparse // convert matrix from full to sparse
(
GrB_Matrix A, // matrix to convert from full to sparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converting full to sparse", GB0) ;
ASSERT (GB_IS_FULL (A) || A->nzmax == 0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (!GB_IS_SPARSE (A)) ;
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
GBURBLE ("(full to sparse) ") ;
//--------------------------------------------------------------------------
// allocate A->p and A->i
//--------------------------------------------------------------------------
int64_t avdim = A->vdim ;
int64_t avlen = A->vlen ;
int64_t anz = avdim * avlen ;
ASSERT (GB_Index_multiply (&anz, avdim, avlen) == true) ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
int64_t *restrict Ai = NULL ; size_t Ai_size = 0 ;
Ap = GB_MALLOC (avdim+1, int64_t, &Ap_size) ;
Ai = GB_MALLOC (anz, int64_t, &Ai_size) ;
if (Ap == NULL || Ai == NULL)
{
// out of memory
GB_FREE (&Ap, Ap_size) ;
GB_FREE (&Ai, Ai_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
A->p = Ap ; A->p_size = Ap_size ;
A->i = Ai ; A->i_size = Ai_size ;
A->plen = avdim ;
A->nvec = avdim ;
A->nvec_nonempty = (avlen == 0) ? 0 : avdim ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// fill the A->p and A->i pattern
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k <= avdim ; k++)
{
Ap [k] = k * avlen ;
}
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
Ai [p] = p % avlen ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converted from full to sparse", GB0) ;
ASSERT (GB_IS_SPARSE (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
return (GrB_SUCCESS) ;
}
|
modifier_view.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: David Weese <david.weese@fu-berlin.de>
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h.
// TODO(holtgrew): Move out convert()
#ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_
#define SEQAN_MODIFIER_MODIFIER_VIEW_H_
namespace seqan
{
// ==========================================================================
// Forwards
// ==========================================================================
// ==========================================================================
// Classes
// ==========================================================================
// --------------------------------------------------------------------------
// Class ModView
// --------------------------------------------------------------------------
/*!
* @class ModViewModifiedIterator
* @extends ModifiedIterator
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedIterator<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
/*!
* @class ModViewModifiedString
* @extends ModifiedString
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedString<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
/**
.Spec.ModView:
..summary:Transforms the characters of the $THost$ string/iterator using a custom function.
..cat:Modifier
..general:Class.ModifiedIterator
..general:Class.ModifiedString
..signature:ModifiedIterator<THost, ModView<TFunctor> >
..signature:ModifiedString<THost, ModView<TFunctor> >
..param.THost:Original string/iterator.
...type:Concept.RandomAccessIteratorConcept
..param.TFunctor:A unary function (see STL's $unary_function$).
...remarks:The argument type of $TFunctor$ must be $VALUE<THost>::Type$.
..remarks:The @Metafunction.Value@ type of this modifier is the result type of $TFunctor$.
..include:seqan/modifier.h
*/
template <typename TFunctor>
struct ModView {};
template <typename TFunctor>
struct ModViewCargo
{
TFunctor func;
};
template <typename THost, typename TFunctor>
class ModifiedIterator<THost, ModView<TFunctor> >
{
public:
typedef typename Cargo<ModifiedIterator>::Type TCargo_;
THost _host;
TCargo_ _cargo;
mutable typename Value<ModifiedIterator>::Type tmp_value;
ModifiedIterator():
tmp_value()
{}
template <typename TOtherHost>
ModifiedIterator(ModifiedIterator<TOtherHost, ModView<TFunctor> > & origin):
_host(origin._host), _cargo(origin._cargo), tmp_value()
{}
explicit
ModifiedIterator(THost const & host):
_host(host), tmp_value()
{}
ModifiedIterator(THost const & host, TFunctor const & functor):
_host(host), tmp_value()
{
cargo(*this).func = functor;
}
};
// --------------------------------------------------------------------------
// Class ModifiedString
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
class ModifiedString<THost, ModView<TFunctor> >
{
public:
typedef typename Pointer_<THost>::Type THostPointer_;
typedef typename Cargo<ModifiedString>::Type TCargo_;
mutable THostPointer_ _host;
TCargo_ _cargo;
mutable typename Value<ModifiedString>::Type tmp_value;
// Default constructor.
ModifiedString():
tmp_value()
{}
// Construct with the actual host.
explicit
ModifiedString(typename Parameter_<THost>::Type host):
_host(_toPointer(host)), tmp_value()
{}
// Construct with the functor.
explicit
ModifiedString(TFunctor const & functor):
tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host from a non-const host.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Construct with the actual host; variant with functor.
ModifiedString(typename Parameter_<THost>::Type host, TFunctor const & functor) :
_host(_toPointer(host)), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host with a non-const host; variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#ifdef SEQAN_CXX11_STANDARD
// Constructor for innermost type; hand down to _host which is a ModifiedString itself.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#else
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant.
template <typename THost_>
explicit
ModifiedString(THost_ const & host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_ const>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant with
// functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Non-const variant with
// functor.
template <typename THost_>
explicit
ModifiedString(THost_ const & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<THost, THost_ const>)) :
_host(host), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
#endif
ModifiedString & operator= (THost & other)
{
_host = _toPointer(other);
return *this;
}
template <typename TPos>
inline typename Reference<ModifiedString>::Type
operator[](TPos pos)
{
return value(*this, pos);
}
template <typename TPos>
inline typename Reference<ModifiedString const>::Type
operator[](TPos pos) const
{
return value(*this, pos);
}
};
// ==========================================================================
// Metafunctions
// ==========================================================================
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// --------------------------------------------------------------------------
// Metafunction Value [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef typename TFunctor::result_type TResult_;
typedef typename RemoveConst_<TResult_>::Type Type;
};
// --------------------------------------------------------------------------
// Metafunction GetValue [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > : Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction Reference [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef typename Value<ModifiedIterator<THost, ModView<TFunctor> > >::Type Type;
};
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo< ModifiedString<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// ==========================================================================
// Functions
// ==========================================================================
// --------------------------------------------------------------------------
// Function value() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > >::Type
value(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
me.tmp_value = cargo(me).func(getValue(host(me)));
return me.tmp_value;
}
template <typename THost, typename TFunctor>
inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
value(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
me.tmp_value = cargo(me).func(getValue(host(me)));
return me.tmp_value;
}
// --------------------------------------------------------------------------
// Function getValue() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return cargo(me).func(getValue(host(me)));
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return cargo(me).func(getValue(host(me)));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename Reference<ModifiedString<THost, ModView<TFunctor> > >::Type
value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
me.tmp_value = cargo(me).func(getValue(host(me), pos));
return me.tmp_value;
}
template <typename THost, typename TFunctor, typename TPos>
inline typename Reference<ModifiedString<THost, ModView<TFunctor> > const>::Type
value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
me.tmp_value = cargo(me).func(getValue(host(me), pos));
return me.tmp_value;
}
// --------------------------------------------------------------------------
// Function value() [ModifiedString]
// --------------------------------------------------------------------------
// template <typename THost, typename TFunctor, typename TSpec, typename TPos>
// inline typename Reference<Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> >::Type
// value(Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> & me, TPos pos)
// {
// host(me).tmp_value = cargo(host(me)).func(*(begin(me, Standard()) + pos));
// return host(me).tmp_value;
// }
//
// template <typename THost, typename TFunctor, typename TSpec, typename TPos>
// inline typename Reference<Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> const>::Type
// value(Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> const & me, TPos pos)
// {
// host(me).tmp_value = cargo(host(me)).func(*(begin(me, Standard()) + pos));
// return host(me).tmp_value;
// }
// --------------------------------------------------------------------------
// Function getValue() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
// --------------------------------------------------------------------------
// Function getValue() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TSpec, typename TPos>
inline typename GetValue<Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> >::Type
getValue(Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> & me, TPos pos)
{
return cargo(me).func(*(begin(me, Standard()) + pos));
}
template <typename THost, typename TFunctor, typename TSpec, typename TPos>
inline typename GetValue<Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> const>::Type
getValue(Segment<ModifiedString<THost, ModView<TFunctor> >, TSpec> const & me, TPos pos)
{
return cargo(me).func(*(begin(me, Standard()) + pos));
}
// --------------------------------------------------------------------------
// Function assignModViewFunctor()
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline void
assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & functor)
{
cargo(me).func = functor;
}
// --------------------------------------------------------------------------
// Function convert()
// --------------------------------------------------------------------------
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence const & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence const, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
} // namespace seqan
#endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
|
mansoo_mat.h | #pragma once
#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
template <typename T>
class MSMat {
public:
MSMat(int size);
~MSMat();
bool set(std::uint16_t row, std::uint16_t col, T value);
bool mul(MSMat<T> &a, MSMat<T> &b);
bool mul_using_mp(MSMat<T> &a, MSMat<T> &b);
bool fill_zeros();
bool fill_ones();
bool fill_random_number();
void print();
private:
int size_ = 0;
int total_size_ = 0;
T* matrix_ = NULL;
};
template<typename T>
MSMat<T>::MSMat(int size) {
size_ = size;
if (size_ < 2) {
size_ = 2;
std::cout << "Mat<T>::Mat, The matrix size must be 2 or greater." << std::endl;
}
total_size_ = size_ * size_;
matrix_ = new T[total_size_];
if (matrix_ == NULL)
std::cout << "Mat<T>::Mat, Not enough memory." << std::endl;
}
template<typename T>
MSMat<T>::~MSMat() {
if (matrix_ != NULL) {
delete matrix_;
matrix_ = NULL;
}
}
template<typename T>
bool MSMat<T>::set(std::uint16_t row, std::uint16_t col, T value) {
if (matrix_ == NULL)
return false;
std::uint32_t pos = row * size_ + col;
if (pos >= total_size_) {
std::cout << "Mat<T>::set, Out of range of matrix." << std::endl;
return false;
}
*(matrix_ + pos) = value;
return true;
}
template<typename T>
bool MSMat<T>::mul(MSMat<T> &a, MSMat<T> &b) {
if (a.size_ != b.size_ || a.size_ != this->size_) {
std::cout << "Mat<T>::mul, Out of range of matrix." << std::endl;
return false;
}
if (a.matrix_ == NULL || b.matrix_ == NULL || this->matrix_ == NULL) {
std::cout << "Mat<T>::mul, Not enough memory." << std::endl;
return false;
}
int idx_1, idx_2, idx_3;
T* relative_mat_a;
T* relative_mat_b;
T* relative_mat_c;
{
for (idx_1 = 0; idx_1 < size_; idx_1++) {
for (idx_2 = 0; idx_2 < size_; idx_2++) {
relative_mat_a = a.matrix_ + idx_2 * size_ + idx_1;
relative_mat_b = b.matrix_ + idx_1 * size_;
relative_mat_c = this->matrix_ + idx_2 * size_;
for (idx_3 = 0; idx_3 < size_; idx_3++) {
*relative_mat_c += *relative_mat_a * *relative_mat_b;
relative_mat_c++;
relative_mat_b++;
}
}
}
}
return true;
}
template<typename T>
bool MSMat<T>::mul_using_mp(MSMat<T> &a, MSMat<T> &b) {
#ifndef USE_OPENMP
std::cout << "MSMat<T>::mul_using_mp(), This system does not support the OpenMP. Please check it!" << std::endl;
return false;
#endif
if (a.size_ != b.size_ || a.size_ != this->size_) {
std::cout << "Mat<T>::mul, Out of range of matrix." << std::endl;
return false;
}
if (a.matrix_ == NULL || b.matrix_ == NULL || this->matrix_ == NULL) {
std::cout << "Mat<T>::mul, Not enough memory." << std::endl;
return false;
}
int idx_1, idx_2, idx_3;
T* relative_mat_a;
T* relative_mat_b;
T* relative_mat_c;
int tid, nthreads;
int chunk = 10;
T* mat_a = a.matrix_;
T* mat_b = b.matrix_;
T* mat_c = matrix_;
#ifdef USE_OPENMP
#pragma omp parallel shared(mat_a, mat_b, mat_c, relative_mat_a, relative_mat_b, relative_mat_c, nthreads) private( idx_1, idx_2, idx_3, tid)
#endif
{
#ifdef USE_OPENMP
tid = omp_get_thread_num();
if (tid == 0) {
nthreads = omp_get_num_threads();
// printf("Starting matrix multiple example with %d threads\n", nthreads);
}
#endif
#ifdef USE_OPENMP
#pragma omp for schedule(static)
#endif
for (idx_1 = 0; idx_1 < size_; idx_1++) {
for (idx_2 = 0; idx_2 < size_; idx_2++) {
relative_mat_a = mat_a + idx_2 * size_ + idx_1;
relative_mat_b = mat_b + idx_1 * size_;
relative_mat_c = mat_c + idx_2 * size_;
for (idx_3 = 0; idx_3 < size_; idx_3++) {
*relative_mat_c += *relative_mat_a * *relative_mat_b;
relative_mat_c++;
relative_mat_b++;
}
}
}
}
return true;
}
template<typename T>
bool MSMat<T>::fill_zeros() {
for (int index = 0; index < total_size_; index++)
matrix_[index] = 0;
return true;
}
template<typename T>
bool MSMat<T>::fill_ones() {
for (int index = 0; index < total_size_; index++)
matrix_[index] = 1;
return true;
}
template<typename T>
bool MSMat<T>::fill_random_number() {
srand(time(0));
for (int index = 0; index < total_size_; index++)
matrix_[index] = rand() % 100;
return true;
}
template<typename T>
void MSMat<T>::print() {
if (matrix_ == NULL)
return;
auto value_pos = matrix_;
for (auto row = 0; row < size_; ++row ) {
for (auto col = 0; col < size_; ++col ) {
std::cout << *value_pos << "\t";
value_pos++;
}
std::cout << "\n";
}
std::cout << "\n";
}
|
TimeCluster.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of SeisSol.
*
* @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer)
*
* @section LICENSE
* Copyright (c) 2013-2015, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* LTS cluster in SeisSol.
**/
#ifndef TIMECLUSTER_H_
#define TIMECLUSTER_H_
#ifdef USE_MPI
#include <mpi.h>
#include <list>
#endif
#include <Initializer/typedefs.hpp>
#include <SourceTerm/typedefs.hpp>
#include <utils/logger.h>
#include <Initializer/LTS.h>
#include <Initializer/tree/LTSTree.hpp>
#include <Kernels/Time.h>
#include <Kernels/Local.h>
#include <Kernels/Neighbor.h>
#include <Kernels/DynamicRupture.h>
#include <Kernels/Plasticity.h>
#include <Solver/FreeSurfaceIntegrator.h>
#include <Monitoring/LoopStatistics.h>
#include <Kernels/TimeCommon.h>
#ifdef ACL_DEVICE
#include <device.h>
#include <Solver/Pipeline/DrPipeline.h>
#endif
namespace seissol {
namespace time_stepping {
class TimeCluster;
}
namespace kernels {
class ReceiverCluster;
}
}
/**
* Time cluster, which represents a collection of elements having the same time step width.
**/
class seissol::time_stepping::TimeCluster
{
public:
//! cluster id on this rank
const unsigned int m_clusterId;
//! global cluster cluster id
const unsigned int m_globalClusterId;
private:
bool usePlasticity;
//! number of time steps
unsigned long m_numberOfTimeSteps;
/*
* integrators
*/
//! time kernel
kernels::Time m_timeKernel;
//! local kernel
kernels::Local m_localKernel;
//! neighbor kernel
kernels::Neighbor m_neighborKernel;
kernels::DynamicRupture m_dynamicRuptureKernel;
/*
* mesh structure
*/
struct MeshStructure *m_meshStructure;
/*
* global data
*/
//! global data structures
GlobalData *m_globalDataOnHost{nullptr};
GlobalData *m_globalDataOnDevice{nullptr};
#ifdef ACL_DEVICE
device::DeviceInstance& device = device::DeviceInstance::getInstance();
dr::pipeline::DrPipeline drPipeline;
#endif
/*
* element data and mpi queues
*/
#ifdef USE_MPI
//! pending copy region sends
std::list< MPI_Request* > m_sendQueue;
//! pending ghost region receives
std::list< MPI_Request* > m_receiveQueue;
#endif
seissol::initializers::TimeCluster* m_clusterData;
seissol::initializers::TimeCluster* m_dynRupClusterData;
seissol::initializers::LTS* m_lts;
seissol::initializers::DynamicRupture* m_dynRup;
//! time step width of the performed time step.
double m_timeStepWidth;
//! Mapping of cells to point sources
sourceterm::CellToPointSourcesMapping const* m_cellToPointSources;
//! Number of mapping of cells to point sources
unsigned m_numberOfCellToPointSourcesMappings;
//! Point sources
sourceterm::PointSources const* m_pointSources;
//! true if dynamic rupture faces are present
bool m_dynamicRuptureFaces;
enum ComputePart {
LocalInterior = 0,
NeighborInterior,
DRNeighborInterior,
#ifdef USE_MPI
LocalCopy,
NeighborCopy,
DRNeighborCopy,
#endif
DRFrictionLawCopy,
DRFrictionLawInterior,
PlasticityCheck,
PlasticityYield,
NUM_COMPUTE_PARTS
};
long long m_flops_nonZero[NUM_COMPUTE_PARTS];
long long m_flops_hardware[NUM_COMPUTE_PARTS];
//! Tv parameter for plasticity
double m_tv;
//! Relax time for plasticity
double m_oneMinusIntegratingFactor;
//! Stopwatch of TimeManager
LoopStatistics* m_loopStatistics;
unsigned m_regionComputeLocalIntegration;
unsigned m_regionComputeNeighboringIntegration;
unsigned m_regionComputeDynamicRupture;
kernels::ReceiverCluster* m_receiverCluster;
#ifdef USE_MPI
/**
* Receives the copy layer data from relevant neighboring MPI clusters.
**/
void receiveGhostLayer();
/**
* Sends the associated regions of the copy layer to relevant neighboring MPI clusters
**/
void sendCopyLayer();
#if defined(_OPENMP) && defined(USE_COMM_THREAD)
/**
* Inits Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread
**/
void initReceiveGhostLayer();
/**
* Inits Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread
**/
void initSendCopyLayer();
/**
* Waits until the initialization of the communication is finished.
**/
void waitForInits();
#endif
/**
* Tests for pending ghost layer communication.
**/
bool testForGhostLayerReceives();
/**
* Tests for pending copy layer communication.
**/
bool testForCopyLayerSends();
#endif
/**
* Writes the receiver output if applicable (receivers present, receivers have to be written).
**/
void writeReceivers();
/**
* Computes the source terms if applicable.
**/
void computeSources();
/**
* Computes dynamic rupture.
**/
void computeDynamicRupture( seissol::initializers::Layer& layerData );
/**
* Computes all cell local integration.
*
* This are:
* * time integration
* * volume integration
* * local boundary integration
*
* Remark: After this step the DOFs are only updated half with the boundary contribution
* of the neighborings cells missing.
*
* @param i_numberOfCells number of cells.
* @param i_cellInformation cell local information.
* @param i_cellData cell data.
* @param io_buffers time integration buffers.
* @param io_derivatives time derivatives.
* @param io_dofs degrees of freedom.
**/
void computeLocalIntegration( seissol::initializers::Layer& i_layerData );
/**
* Computes the contribution of the neighboring cells to the boundary integral.
*
* Remark: After this step (in combination with the local integration) the DOFs are at the next time step.
* TODO: This excludes dynamic rupture contribution.
*
* @param i_numberOfCells number of cells.
* @param i_cellInformation cell local information.
* @param i_cellData cell data.
* @param i_faceNeighbors pointers to neighboring time buffers or derivatives.
* @param io_dofs degrees of freedom.
**/
void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData );
#ifndef ACL_DEVICE
template<bool usePlasticity>
std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData) {
SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION )
m_loopStatistics->begin(m_regionComputeNeighboringIntegration);
real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors);
CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping);
CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation);
PlasticityData* plasticity = i_layerData.var(m_lts->plasticity);
real (*pstrain)[7] = i_layerData.var(m_lts->pstrain);
unsigned numberOTetsWithPlasticYielding = 0;
kernels::NeighborData::Loader loader;
loader.load(*m_lts, i_layerData);
real *l_timeIntegrated[4];
real *l_faceNeighbors_prefetch[4];
#ifdef _OPENMP
#pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping) reduction(+:numberOTetsWithPlasticYielding)
#endif
for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) {
auto data = loader.entry(l_cell);
seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel,
data.cellInformation.ltsSetup,
data.cellInformation.faceTypes,
m_subTimeStart,
m_timeStepWidth,
faceNeighbors[l_cell],
#ifdef _OPENMP
*reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])),
#else
*reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS),
#endif
l_timeIntegrated);
#ifdef ENABLE_MATRIX_PREFETCH
#pragma message("the current prefetch structure (flux matrices and tDOFs is tuned for higher order and shouldn't be harmful for lower orders")
l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][1] :
drMapping[l_cell][1].godunov;
l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][2] :
drMapping[l_cell][2].godunov;
l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][3] :
drMapping[l_cell][3].godunov;
// fourth face's prefetches
if (l_cell < (i_layerData.getNumberOfCells()-1) ) {
l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell+1][0] :
drMapping[l_cell+1][0].godunov;
} else {
l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3];
}
#endif
m_neighborKernel.computeNeighborsIntegral( data,
drMapping[l_cell],
#ifdef ENABLE_MATRIX_PREFETCH
l_timeIntegrated, l_faceNeighbors_prefetch
#else
l_timeIntegrated
#endif
);
if constexpr (usePlasticity) {
numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor,
m_timeStepWidth,
m_tv,
m_globalDataOnHost,
&plasticity[l_cell],
data.dofs,
pstrain[l_cell] );
}
#ifdef INTEGRATE_QUANTITIES
seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth,
i_layerData,
l_cell,
dofs[l_cell] );
#endif // INTEGRATE_QUANTITIES
}
const long long nonZeroFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_nonZero[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_nonZero[PlasticityYield];
const long long hardwareFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_hardware[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_hardware[PlasticityYield];
m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells());
return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity};
}
#endif // ACL_DEVICE
void computeLocalIntegrationFlops( unsigned numberOfCells,
CellLocalInformation const* cellInformation,
long long& nonZeroFlops,
long long& hardwareFlops );
void computeNeighborIntegrationFlops( unsigned numberOfCells,
CellLocalInformation const* cellInformation,
CellDRMapping const (*drMapping)[4],
long long& nonZeroFlops,
long long& hardwareFlops,
long long& drNonZeroFlops,
long long& drHardwareFlops );
void computeDynamicRuptureFlops( seissol::initializers::Layer& layerData,
long long& nonZeroFlops,
long long& hardwareFlops );
void computeFlops();
//! Update relax time for plasticity
void updateRelaxTime() {
m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-m_timeStepWidth / m_tv) : 1.0;
}
public:
//! flags identifiying if the respective part is allowed to be updated
struct {
bool localCopy;
bool neighboringCopy;
bool localInterior;
bool neighboringInterior;
} m_updatable;
#ifdef USE_MPI
//! send true LTS buffers
volatile bool m_sendLtsBuffers;
#endif
//! reset lts buffers before performing time predictions
volatile bool m_resetLtsBuffers;
/* Sub start time of width respect to the next cluster; use 0 if not relevant, for example in GTS.
* LTS requires to evaluate a partial time integration of the derivatives. The point zero in time refers to the derivation of the surrounding time derivatives, which
* coincides with the last completed time step of the next cluster. The start/end of the time step is the start/end of this clusters time step relative to the zero point.
* Example:
* <verb>
* 5 dt
* |-----------------------------------------------------------------------------------------| <<< Time stepping of the next cluster (Cn) (5x larger than the current).
* | | | | | |
* |*****************|*****************|+++++++++++++++++| | | <<< Status of the current cluster.
* | | | | | |
* |-----------------|-----------------|-----------------|-----------------|-----------------| <<< Time stepping of the current cluster (Cc).
* 0 dt 2dt 3dt 4dt 5dt
* </verb>
*
* In the example above two clusters are illustrated: Cc and Cn. Cc is the current cluster under consideration and Cn the next cluster with respect to LTS terminology.
* Cn is currently at time 0 and provided Cc with derivatives valid until 5dt. Cc updated already twice and did its last full update to reach 2dt (== subTimeStart). Next
* computeNeighboringCopy is called to accomplish the next full update to reach 3dt (+++). Besides working on the buffers of own buffers and those of previous clusters,
* Cc needs to evaluate the time prediction of Cn in the interval [2dt, 3dt].
*/
double m_subTimeStart;
//! number of full updates the cluster has performed since the last synchronization
unsigned int m_numberOfFullUpdates;
//! simulation time of the last full update (this is a complete volume and boundary integration)
double m_fullUpdateTime;
//! final time of the prediction (derivatives and time integrated DOFs).
double m_predictionTime;
//! time of the next receiver output
double m_receiverTime;
/**
* Constructs a new LTS cluster.
*
* @param i_clusterId id of this cluster with respect to the current rank.
* @param i_globalClusterId global id of this cluster.
* @param usePlasticity true if using plasticity
* @param i_timeKernel time integration kernel.
* @param i_volumeKernel volume integration kernel.
* @param i_boundaryKernel boundary integration kernel.
* @param i_meshStructure mesh structure of this cluster.
* @param i_copyCellInformation cell information in the copy layer.
* @param i_interiorCellInformation cell information in the interior.
* @param i_globalData global data.
* @param i_copyCellData cell data in the copy layer.
* @param i_interiorCellData cell data in the interior.
* @param i_cells degrees of freedom, time buffers, time derivatives.
**/
TimeCluster(unsigned int i_clusterId,
unsigned int i_globalClusterId,
bool usePlasticity,
MeshStructure *i_meshStructure,
CompoundGlobalData i_globalData,
seissol::initializers::TimeCluster* i_clusterData,
seissol::initializers::TimeCluster* i_dynRupClusterData,
seissol::initializers::LTS* i_lts,
seissol::initializers::DynamicRupture* i_dynRup,
LoopStatistics* i_loopStatistics);
/**
* Destructor of a LTS cluster.
* TODO: Currently prints only statistics in debug mode.
**/
~TimeCluster();
double timeStepWidth() const {
return m_timeStepWidth;
}
void setTimeStepWidth(double timestep) {
m_timeStepWidth = timestep;
updateRelaxTime();
m_dynamicRuptureKernel.setTimeStepWidth(timestep);
}
/**
* Adds a source to the cluster.
*
* @param i_meshId mesh id of the point of interest.
**/
void addSource( unsigned int i_meshId );
/**
* Sets the pointer to the cluster's point sources
*
* @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources
* @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources
* @param i_pointSources pointer to all point sources used on this cluster
*/
void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources,
unsigned i_numberOfCellToPointSourcesMappings,
sourceterm::PointSources const* i_pointSources );
void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) {
m_receiverCluster = receiverCluster;
}
/**
* Set Tv constant for plasticity.
*/
void setTv(double tv) {
m_tv = tv;
updateRelaxTime();
}
#ifdef USE_MPI
/**
* Computes cell local integration of all cells in the copy layer and initiates the corresponding communication.
* LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request.
*
* Cell local integration is:
* * time integration
* * volume integration
* * local boundary integration
*
* @return true if the update (incl. communication requests), false if the update failed due to unfinshed sends of copy data to MPI neighbors.
**/
bool computeLocalCopy();
#endif
/**
* Computes cell local integration of all cells in the interior.
* LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request.
*
* Cell local integration is:
* * time integration
* * volume integration
* * local boundary integration
**/
void computeLocalInterior();
#ifdef USE_MPI
/**
* Computes the neighboring contribution to the boundary integral for all cells in the copy layer.
*
* @return true if the update (incl. communication requests), false if the update failed due to missing data from neighboring ranks.
**/
bool computeNeighboringCopy();
#endif
/**
* Computes the neighboring contribution to the boundary integral for all cells in the interior.
**/
void computeNeighboringInterior();
#if defined(_OPENMP) && defined(USE_MPI) && defined(USE_COMM_THREAD)
/**
* Tests for pending ghost layer communication, active when using communication thread
**/
void pollForGhostLayerReceives();
/**
* Polls for pending copy layer communication, active when using communication thread
**/
void pollForCopyLayerSends();
/**
* Start Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread
**/
void startReceiveGhostLayer();
/**
* start Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread
**/
void startSendCopyLayer();
#endif
};
#endif
|
VerletClusterLists.h | /**
* @file VerletClusterLists.h
* @author nguyen
* @date 14.10.18
*/
#pragma once
#include <cmath>
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CellBasedParticleContainer.h"
#include "autopas/containers/CompatibleTraversals.h"
#include "autopas/containers/ParticleDeletedObserver.h"
#include "autopas/containers/UnknowingCellBorderAndFlagManager.h"
#include "autopas/containers/cellPairTraversals/BalancedTraversal.h"
#include "autopas/containers/verletClusterLists/ClusterTower.h"
#include "autopas/containers/verletClusterLists/VerletClusterListsRebuilder.h"
#include "autopas/containers/verletClusterLists/traversals/VCLTraversalInterface.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/iterators/RegionParticleIterator.h"
#include "autopas/options/LoadEstimatorOption.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/Timer.h"
namespace autopas {
/**
* Particles are divided into clusters.
* The VerletClusterLists class uses neighborhood lists for each cluster
* to calculate pairwise interactions of particles.
* It is optimized for a constant, i.e. particle independent, cutoff radius of
* the interaction.
* @tparam Particle
*/
template <class Particle>
class VerletClusterLists : public ParticleContainerInterface<Particle>, public internal::ParticleDeletedObserver {
public:
/**
* Defines a cluster range used in the static cluster-thread-partition.
*/
struct ClusterRange {
/**
* The index of the tower that contains the first cluster.
*/
size_t startTowerIndex{};
/**
* The index of the first cluster in its tower.
*/
size_t startIndexInTower{};
/**
* The number of clusters in the range.
*/
size_t numClusters{};
};
/**
* Constructor of the VerletClusterLists class.
* The neighbor lists are build using a estimated density.
* The box is divided into cuboids with roughly the
* same side length.
* @param boxMin The lower corner of the domain.
* @param boxMax The upper corner of the domain.
* @param cutoff The cutoff radius of the interaction.
* @param skin The skin radius.
* @param clusterSize Number of particles per cluster.
* @param loadEstimator load estimation algorithm for balanced traversals.
*/
VerletClusterLists(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff, double skin,
size_t clusterSize, LoadEstimatorOption loadEstimator = LoadEstimatorOption::none)
: ParticleContainerInterface<Particle>(),
_clusterSize{clusterSize},
_numClusters{0},
_numTowersPerInteractionLength{0},
_particlesToAdd(autopas_get_max_threads()),
_boxMin{boxMin},
_boxMax{boxMax},
_haloBoxMin{utils::ArrayMath::subScalar(boxMin, cutoff + skin)},
_haloBoxMax{utils::ArrayMath::addScalar(boxMax, cutoff + skin)},
_cutoff{cutoff},
_skin{skin},
_loadEstimator(loadEstimator) {
// always have at least one tower.
_towers.push_back(internal::ClusterTower<Particle>(_clusterSize));
}
/**
* @copydoc ParticleContainerInterface::getParticleCellTypeEnum()
*/
CellType getParticleCellTypeEnum() override { return CellType::ClusterTower; };
/**
* @copydoc ParticleContainerInterface::getContainerType()
*/
[[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::verletClusterLists; }
/**
* Generates the load estimation function depending on _loadEstimator.
* @return load estimator function object.
*/
BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() {
switch (this->_loadEstimator) {
case LoadEstimatorOption::neighborListLength: {
return [&](const std::array<unsigned long, 3> &cellsPerDimension,
const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) {
// the neighborListLength function defined for verletListsCells in not compatible with this container.
unsigned long sum = 0;
for (unsigned long x = lowerCorner[0]; x <= upperCorner[0]; x++) {
for (unsigned long y = lowerCorner[1]; y <= upperCorner[1]; y++) {
unsigned long cellLoad = 0;
auto &tower = getTowerAtCoordinates(x, y);
for (auto &cluster : tower.getClusters()) {
cellLoad += cluster.getNeighbors().size();
}
sum += cellLoad;
}
}
return sum;
};
}
case LoadEstimatorOption::none:
[[fallthrough]];
default: {
return
[&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner,
const std::array<unsigned long, 3> &upperCorner) { return 1; };
}
}
}
/**
* @copydoc ParticleContainerInterface::iteratePairwise()
*/
void iteratePairwise(TraversalInterface *traversal) override {
if (_isValid == ValidityState::cellsAndListsValid) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::iteratePairwise(): Trying to do a pairwise iteration, even though verlet lists are not "
"valid.");
}
auto *traversalInterface = dynamic_cast<VCLTraversalInterface<Particle> *>(traversal);
if (traversalInterface) {
traversalInterface->setClusterLists(*this);
traversalInterface->setTowers(_towers);
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in VerletClusterLists::iteratePairwise. TraversalID: {}",
traversal->getTraversalType());
}
if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) {
balancedTraversal->setLoadEstimator(getLoadEstimatorFunction());
}
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
/**
* Adds the given particle to the container. rebuildVerletLists() has to be called to have it actually sorted in.
* @param p The particle to add.
*/
void addParticleImpl(const Particle &p) override {
_isValid = ValidityState::invalid;
_particlesToAdd[autopas_get_thread_num()].push_back(p);
}
/**
* @copydoc VerletLists::addHaloParticle()
*/
void addHaloParticleImpl(const Particle &haloParticle) override {
_isValid = ValidityState::invalid;
Particle copy = haloParticle;
copy.setOwnershipState(OwnershipState::halo);
_particlesToAdd[autopas_get_thread_num()].push_back(copy);
}
/**
* @copydoc autopas::ParticleContainerInterface::updateHaloParticle()
*/
bool updateHaloParticle(const Particle &haloParticle) override {
Particle pCopy = haloParticle;
pCopy.setOwnershipState(OwnershipState::halo);
// this might be called from a parallel region so force this iterator to be sequential
for (auto it = getRegionIterator(utils::ArrayMath::subScalar(pCopy.getR(), this->getSkin() / 2),
utils::ArrayMath::addScalar(pCopy.getR(), this->getSkin() / 2),
IteratorBehavior::halo | IteratorBehavior::forceSequential);
it.isValid(); ++it) {
if (pCopy.getID() == it->getID()) {
*it = pCopy;
return true;
}
}
return false;
}
/**
* @copydoc VerletLists::deleteHaloParticles
*/
void deleteHaloParticles() override {
bool deletedSth = false;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel reduction(|| : deletedSth)
#endif
{
for (auto iter = this->begin(IteratorBehavior::halo); iter.isValid(); ++iter) {
internal::deleteParticle(iter);
deletedSth = true;
}
}
if (deletedSth) {
_isValid = ValidityState::invalid;
}
}
/**
* @copydoc VerletLists::updateContainer()
*/
[[nodiscard]] std::vector<Particle> updateContainer() override {
// First delete all halo particles.
this->deleteHaloParticles();
// Delete dummy particles.
#ifdef AUTOPAS_OPENMP
#pragma omp parallel for
#endif
for (size_t i = 0ul; i < _towers.size(); ++i) {
_towers[i].deleteDummyParticles();
}
// next find invalid particles
std::vector<Particle> invalidParticles;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
{
std::vector<Particle> myInvalidParticles;
for (auto iter = this->begin(IteratorBehavior::owned); iter.isValid(); ++iter) {
if (not utils::inBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) {
myInvalidParticles.push_back(*iter);
internal::deleteParticle(iter);
}
}
#ifdef AUTOPAS_OPENMP
#pragma omp critical
#endif
invalidParticles.insert(invalidParticles.end(), myInvalidParticles.begin(), myInvalidParticles.end());
}
if (not invalidParticles.empty()) {
_isValid = ValidityState::invalid;
}
return invalidParticles;
}
/**
* @copydoc ParticleContainerInterface::getTraversalSelectorInfo()
*/
[[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override {
auto boxSizeWithHalo = utils::ArrayMath::sub(this->getHaloBoxMax(), this->getHaloBoxMin());
auto towerSideLength = internal::VerletClusterListsRebuilder<Particle>::estimateOptimalGridSideLength(
this->getNumParticles(), boxSizeWithHalo, _clusterSize);
auto towersPerDim =
internal::VerletClusterListsRebuilder<Particle>::calculateTowersPerDim(boxSizeWithHalo, 1.0 / towerSideLength);
std::array<double, 3> towerSize = {towerSideLength, towerSideLength,
this->getHaloBoxMax()[2] - this->getHaloBoxMin()[2]};
std::array<unsigned long, 3> towerDimensions = {towersPerDim[0], towersPerDim[1], 1};
return TraversalSelectorInfo(towerDimensions, this->getInteractionLength(), towerSize, _clusterSize);
}
/**
* @copydoc ParticleContainerInterface::begin()
* @note This function additionally rebuilds the towers if the tower-structure isn't valid.
*/
[[nodiscard]] ParticleIteratorWrapper<Particle, true> begin(
IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) override {
prepareContainerForIterator(behavior);
return ParticleIteratorWrapper<Particle, true>(
new internal::ParticleIterator<Particle, internal::ClusterTower<Particle>, true>(
&(this->_towers), 0, &unknowingCellBorderAndFlagManager, behavior, nullptr));
}
/**
* @copydoc ParticleContainerInterface::begin()
* @note const version.
* @note This function additionally iterates over the _particlesToAdd vector if the tower-structure isn't valid.
*/
[[nodiscard]] ParticleIteratorWrapper<Particle, false> begin(
IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) const override {
/// @todo use proper cellBorderAndFlagManager instead of the unknowing.
if (_isValid != ValidityState::invalid) {
if (not particlesToAddEmpty()) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::begin() const: Error: particle container is valid, but _particlesToAdd isn't empty!");
}
// If the particles are sorted into the towers, we can simply use the iteration over towers.
return ParticleIteratorWrapper<Particle, false>{
new internal::ParticleIterator<Particle, internal::ClusterTower<Particle>, false>(
&(this->_towers), 0, &unknowingCellBorderAndFlagManager, behavior, nullptr)};
} else {
// if the particles are not sorted into the towers, we have to also iterate over _particlesToAdd.
return ParticleIteratorWrapper<Particle, false>{
new internal::ParticleIterator<Particle, internal::ClusterTower<Particle>, false>(
&(this->_towers), 0, &unknowingCellBorderAndFlagManager, behavior, &_particlesToAdd)};
}
}
/**
* @copydoc ParticleContainerInterface::getRegionIterator()
* @note This function additionally rebuilds the towers if the tower-structure isn't valid.
*/
[[nodiscard]] ParticleIteratorWrapper<Particle, true> getRegionIterator(const std::array<double, 3> &lowerCorner,
const std::array<double, 3> &higherCorner,
IteratorBehavior behavior) override {
prepareContainerForIterator(behavior);
auto [lowerCornerInBounds, upperCornerInBounds, cellsOfInterest] =
getRegionIteratorHelper(lowerCorner, higherCorner, behavior);
return ParticleIteratorWrapper<Particle, true>(
new internal::RegionParticleIterator<Particle, internal::ClusterTower<Particle>, true>(
&this->_towers, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest,
&internal::UnknowingCellBorderAndFlagManager::get(), behavior, nullptr));
}
/**
* @copydoc ParticleContainerInterface::getRegionIterator()
* @note const version.
* @note This function additionally iterates over _particlesToAdd if the container structure isn't valid.
*/
[[nodiscard]] ParticleIteratorWrapper<Particle, false> getRegionIterator(const std::array<double, 3> &lowerCorner,
const std::array<double, 3> &higherCorner,
IteratorBehavior behavior) const override {
if (_isValid != ValidityState::invalid && not particlesToAddEmpty()) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::begin() const: Error: particle container is valid, but _particlesToAdd isn't empty!");
}
auto [lowerCornerInBounds, upperCornerInBounds, cellsOfInterest] =
getRegionIteratorHelper(lowerCorner, higherCorner, behavior);
return ParticleIteratorWrapper<Particle, false>(
new internal::RegionParticleIterator<Particle, internal::ClusterTower<Particle>, false>(
&this->_towers, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest,
&internal::UnknowingCellBorderAndFlagManager::get(), behavior,
_isValid != ValidityState::invalid ? nullptr : &_particlesToAdd));
}
/**
* @copydoc ParticleContainerInterface::rebuildNeighborLists()
*/
void rebuildNeighborLists(TraversalInterface *traversal) override {
if (_isValid == ValidityState::invalid) {
rebuildTowersAndClusters();
}
_builder->rebuildNeighborListsAndFillClusters(traversal->getUseNewton3());
auto *clusterTraversalInterface = dynamic_cast<VCLTraversalInterface<Particle> *>(traversal);
if (clusterTraversalInterface) {
if (clusterTraversalInterface->needsStaticClusterThreadPartition()) {
calculateClusterThreadPartition();
}
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in VerletClusterLists::rebuildNeighborLists. TraversalID: {}",
traversal->getTraversalType());
}
}
/**
* Helper method to iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @tparam inParallel If the iteration should be executed in parallel or sequential. See traverseClustersParallel()
* for thread safety.
* @param loopBody The lambda to execute for all clusters. Parameters given is internal::Cluster& cluster.
*/
template <bool inParallel, class LoopBody>
void traverseClusters(LoopBody &&loopBody) {
if (inParallel) {
traverseClustersParallel<LoopBody>(std::forward<LoopBody>(loopBody));
} else {
traverseClustersSequential<LoopBody>(std::forward<LoopBody>(loopBody));
}
}
/**
* @copydoc ParticleContainerInterface::getNumParticles()
*/
[[nodiscard]] unsigned long getNumParticles() const override {
size_t sum = std::accumulate(_towers.begin(), _towers.end(), 0,
[](size_t acc, const auto &tower) { return acc + tower.getNumActualParticles(); });
sum = std::accumulate(_particlesToAdd.begin(), _particlesToAdd.end(), sum,
[](size_t acc, const auto &buffer) { return acc + buffer.size(); });
return sum;
}
/**
* Returns the cluster-thread-partition.
* @return The cluster-thread-partition.
*/
const auto &getClusterThreadPartition() const { return _clusterThreadPartition; }
/**
* Returns the number of clusters in this container.
* @return The number of clusters in this container.
*/
auto getNumClusters() const { return _numClusters; }
/**
* Returns the grid side length of the grids in the container.
* @return the grid side length of the grids in the container.
*/
auto getTowerSideLength() const { return _towerSideLength; }
/**
* Returns 1 / towerSideLength
* @return
*/
auto getTowerSideLengthReciprocal() const { return _towerSideLengthReciprocal; }
/**
* Returns the number of grids per dimension on the container.
* @return the number of grids per dimension on the container.
*/
auto getTowersPerDimension() const { return _towersPerDim; }
/**
* Returns the number of particles in each cluster.
* @return the number of particles in each cluster.
*/
auto getClusterSize() const { return _clusterSize; }
/**
* Returns the towers per interaction length. That is how many towers fit into one interaction length rounded up.
* @return the number of towers per interaction length.
*/
auto getNumTowersPerInteractionLength() const { return _numTowersPerInteractionLength; }
/**
* Loads all particles of the container in their correct SoA and generates the SoAViews for the clusters.
* @tparam Functor The type of the functor to use.
* @param functor The functor to use for loading the particles into the SoA.
*/
template <class Functor>
void loadParticlesIntoSoAs(Functor *functor) {
const auto numTowers = _towers.size();
#if defined(AUTOPAS_OPENMP)
/// @todo: find sensible chunksize
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t index = 0; index < numTowers; index++) {
_towers[index].loadSoA(functor);
}
}
/**
* Extracts all SoAs of the container into the particles.
* @tparam Functor The type of the functor to use.
* @param functor The functor to use for extracting the SoAs into the particles..
*/
template <class Functor>
void extractParticlesFromSoAs(Functor *functor) {
const auto numTowers = _towers.size();
#if defined(AUTOPAS_OPENMP)
/// @todo: find sensible chunksize
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t index = 0; index < numTowers; index++) {
_towers[index].extractSoA(functor);
}
}
/**
* Returns a reference to the tower for the given tower grid coordinates.
* @param x The x-th tower in x direction.
* @param y The y-th tower in y direction.
* @return a reference to the tower for the given tower grid coordinates.
*/
auto &getTowerAtCoordinates(const size_t x, const size_t y) { return _towers[towerIndex2DTo1D(x, y)]; }
/**
* Returns the 1D index for the given tower grid coordinates of a tower.
*
* @param x The x-coordinate of the tower.
* @param y The y-coordinate of the tower.
* @param towersPerDim The number of towers in each dimension.
* @return the 1D index for the given tower grid coordinates of a tower.
*/
static auto towerIndex2DTo1D(const size_t x, const size_t y, const std::array<size_t, 2> towersPerDim) {
return x + y * towersPerDim[0];
}
/**
* Returns the 1D index for the given 2D-coordinates of a tower.
*
* @param x The x-coordinate of the tower.
* @param y The y-coordinate of the tower.
* @return the 1D index for the given 2D-coordinates of a tower.
*/
[[nodiscard]] size_t towerIndex2DTo1D(const size_t x, const size_t y) const {
return towerIndex2DTo1D(x, y, _towersPerDim);
}
/**
* @copydoc ParticleContainerInterface::getBoxMax()
*/
[[nodiscard]] const std::array<double, 3> &getBoxMax() const override { return _boxMax; }
/**
* @copydoc ParticleContainerInterface::setBoxMax()
*/
void setBoxMax(const std::array<double, 3> &boxMax) override { _boxMax = boxMax; }
/**
* Get the upper corner of the halo box.
* @return the upper corner of the halo box.
*/
[[nodiscard]] const std::array<double, 3> &getHaloBoxMax() const { return _haloBoxMax; }
/**
* @copydoc ParticleContainerInterface::getBoxMin()
*/
[[nodiscard]] const std::array<double, 3> &getBoxMin() const override { return _boxMin; }
/**
* @copydoc ParticleContainerInterface::setBoxMin()
*/
void setBoxMin(const std::array<double, 3> &boxMin) override { _boxMin = boxMin; }
/**
* Get the lower corner of the halo box.
* @return the lower corner of the halo box.
*/
[[nodiscard]] const std::array<double, 3> &getHaloBoxMin() const { return _haloBoxMin; }
/**
* @copydoc ParticleContainerInterface::getCutoff()
*/
[[nodiscard]] double getCutoff() const override { return _cutoff; }
/**
* @copydoc ParticleContainerInterface::setCutoff()
*/
void setCutoff(double cutoff) override { _cutoff = cutoff; }
/**
* @copydoc ParticleContainerInterface::getSkin()
*/
[[nodiscard]] double getSkin() const override { return _skin; }
/**
* @copydoc ParticleContainerInterface::setSkin()
*/
void setSkin(double skin) override { _skin = skin; }
/**
* @copydoc ParticleContainerInterface::getInteractionLength()
*/
[[nodiscard]] double getInteractionLength() const override { return _cutoff + _skin; }
/**
* @copydoc ParticleContainerInterface::deleteAllParticles()
*/
void deleteAllParticles() override {
_isValid = ValidityState::invalid;
std::for_each(_particlesToAdd.begin(), _particlesToAdd.end(), [](auto &buffer) { buffer.clear(); });
std::for_each(_towers.begin(), _towers.end(), [](auto &tower) { tower.clear(); });
}
protected:
/**
* Rebuild the towers and the clusters.
* This function sets the container structure to valid.
*/
void rebuildTowersAndClusters() {
// collect all particles to add from accross the thread buffers
typename decltype(_particlesToAdd)::value_type particlesToAdd;
size_t numParticlesToAdd = std::accumulate(_particlesToAdd.begin(), _particlesToAdd.end(), 0,
[](size_t acc, const auto &buffer) { return acc + buffer.size(); });
particlesToAdd.reserve(numParticlesToAdd);
std::for_each(_particlesToAdd.begin(), _particlesToAdd.end(), [&](auto &particlesBuffer) {
particlesToAdd.insert(particlesToAdd.end(), particlesBuffer.begin(), particlesBuffer.end());
particlesBuffer.clear();
});
_builder =
std::make_unique<internal::VerletClusterListsRebuilder<Particle>>(*this, _towers, particlesToAdd, _clusterSize);
std::tie(_towerSideLength, _numTowersPerInteractionLength, _towersPerDim, _numClusters) =
_builder->rebuildTowersAndClusters();
_towerSideLengthReciprocal = 1 / _towerSideLength;
_isValid = ValidityState::cellsValidListsInvalid;
for (auto &tower : _towers) {
tower.setParticleDeletionObserver(this);
}
}
/**
* Helper method to sequentially iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given is internal::Cluster& cluster.
*/
template <class LoopBody>
void traverseClustersSequential(LoopBody &&loopBody) {
for (size_t x = 0; x < _towersPerDim[0]; x++) {
for (size_t y = 0; y < _towersPerDim[1]; y++) {
auto &tower = getTowerAtCoordinates(x, y);
for (auto &cluster : tower.getClusters()) {
loopBody(cluster);
}
}
}
}
/**
* Helper method to iterate over all clusters in parallel.
*
* It is always safe to modify the particles in the cluster that is passed to the given loop body. However, when
* modifying particles from other clusters, the caller has to make sure that no data races occur. Particles must not
* be added or removed during the traversal.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given is internal::Cluster& cluster.
*/
template <class LoopBody>
void traverseClustersParallel(LoopBody &&loopBody) {
const auto towersPerDimX = _towersPerDim[0];
const auto towersPerDimY = _towersPerDim[1];
#if defined(AUTOPAS_OPENMP)
/// @todo: find sensible chunksize
#pragma omp parallel for schedule(dynamic) collapse(2)
#endif
for (size_t x = 0; x < towersPerDimX; x++) {
for (size_t y = 0; y < towersPerDimY; y++) {
auto &tower = getTowerAtCoordinates(x, y);
for (auto &cluster : tower.getClusters()) {
loopBody(cluster);
}
}
}
}
/**
* Calculates a cluster thread partition that aims to give each thread about the same amount of cluster pair
* interactions, if each thread handles the neighbors of all clusters it gets assigned.
*/
void calculateClusterThreadPartition() {
size_t numClusterPairs = 0;
this->template traverseClusters<false>(
[&numClusterPairs](auto &cluster) { numClusterPairs += cluster.getNeighbors().size(); });
constexpr int minNumClusterPairsPerThread = 1000;
auto numThreads =
std::clamp(static_cast<int>(numClusterPairs / minNumClusterPairsPerThread), 1, autopas_get_max_threads());
size_t numClusterPairsPerThread =
std::max(static_cast<unsigned long>(std::ceil(static_cast<double>(numClusterPairs) / numThreads)), 1ul);
if (numClusterPairsPerThread * numThreads < numClusterPairs) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::calculateClusterThreadPartition(): numClusterPairsPerThread ({}) * numThreads ({})={} "
"should always "
"be at least the amount of Cluster Pairs ({})!",
numClusterPairsPerThread, numThreads, numClusterPairsPerThread * numThreads, numClusterPairs);
}
fillClusterRanges(numClusterPairsPerThread, numThreads);
}
/**
* Fills in the cluster ranges of the cluster thread partition. It aims to assign each thread appropriately the same
* number of cluster pairs.
* @param numClusterPairsPerThread The approximate number of cluster pairs per thread.
* @param numThreads The number of threads to use.
*/
void fillClusterRanges(size_t numClusterPairsPerThread, int numThreads) {
if (numClusterPairsPerThread < 1) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::fillClusterRanges(): numClusterPairsPerThread({}) is less than one, this is not "
"supported "
"and will lead to errors!",
numClusterPairsPerThread);
}
_clusterThreadPartition.resize(numThreads);
size_t currentThread = 0;
size_t currentNumClustersToAdd = 0;
size_t numClusterPairsTotal = 0;
bool threadIsInitialized = false;
// Iterate over the clusters of all towers
for (size_t currentTowerIndex = 0; currentTowerIndex < _towers.size(); currentTowerIndex++) {
auto ¤tTower = _towers[currentTowerIndex];
for (size_t currentClusterInTower = 0; currentClusterInTower < currentTower.getNumClusters();
currentClusterInTower++) {
auto ¤tCluster = currentTower.getCluster(currentClusterInTower);
// If on a new thread, start with the clusters for this thread here.
if (not threadIsInitialized) {
_clusterThreadPartition[currentThread] = {currentTowerIndex, currentClusterInTower, 0};
threadIsInitialized = true;
}
currentNumClustersToAdd++;
numClusterPairsTotal += currentCluster.getNeighbors().size();
// If the thread is finished, write number of clusters and start new thread.
if (numClusterPairsTotal >= numClusterPairsPerThread * (currentThread + 1)) {
// Add the number of clusters for the finished thread.
_clusterThreadPartition[currentThread].numClusters += currentNumClustersToAdd;
currentNumClustersToAdd = 0;
// Go to next thread!
currentThread++;
// if we are already at the end of all threads, go back to last thread!
// this is a safety precaution and should not really matter.
if (currentThread >= numThreads) {
--currentThread;
threadIsInitialized = true;
} else {
threadIsInitialized = false;
}
}
}
}
if (not threadIsInitialized) {
_clusterThreadPartition[currentThread] = {0, 0, 0};
}
// Make sure the last cluster range contains the rest of the clusters, even if there is not the perfect number left.
if (currentNumClustersToAdd != 0) {
_clusterThreadPartition[currentThread].numClusters += currentNumClustersToAdd;
}
// Theoretically, some threads may still remain. This ensures that their numClusters are set to 0.
while (++currentThread < numThreads) {
_clusterThreadPartition[currentThread] = {0, 0, 0};
}
}
/**
* If a particle is deleted, we want _isValid to be set to invalid, as the tower structure is invalidated.
*
* This function is not called, if a particle from the _particlesToAdd vector is deleted!
*/
void notifyParticleDeleted() override {
// this is potentially called from a threaded environment, so we have to make this atomic here!
_isValid.store(ValidityState::invalid, std::memory_order::memory_order_relaxed);
}
private:
/**
* load estimation algorithm for balanced traversals.
*/
autopas::LoadEstimatorOption _loadEstimator;
/**
* Checks the state of the container and whether it is ready to launch an iterator.
* Depending on the behavior and current state, this might rebuild towers and clusters.
*
* @note This function needs to be called by all functions that create an iterator with modifiable flag == true.
*
* @param behavior
*/
void prepareContainerForIterator(const IteratorBehavior &behavior) {
// For good openmp scalability we want the particles to be sorted into the clusters, so we do this!
// If multiple asynchronous iterators are used the container must already be valid.
// Otherwise it is impossible to decide which thread needs to rebuild the tower structure and which need to wait.
if (behavior & IteratorBehavior::forceSequential) {
if (_isValid == ValidityState::invalid) {
autopas::utils::ExceptionHandler::exception(
"VerletClusterLists::prepareContainerForIterator(): Parallel iterators with behavior containing "
"forceSequential encountered, but the container is invalid.");
}
} else {
// Only one thread is allowed to rebuild the towers, so we do an omp single here.
// This single is only possible when we do not force sequential mode as otherwise not all threads might pass
// through here and we end up waiting for them forever.
#ifdef AUTOPAS_OPENMP
#pragma omp single
#endif
if (_isValid == ValidityState::invalid) {
rebuildTowersAndClusters();
}
// there is an implicit barrier at end of single!
}
}
/**
* Helper function for the region iterators to determine bounds and towers to iterate over.
* @param lowerCorner
* @param higherCorner
* @param behavior
* @return
*/
[[nodiscard]] auto getRegionIteratorHelper(const std::array<double, 3> &lowerCorner,
const std::array<double, 3> &higherCorner,
IteratorBehavior behavior) const {
// Check all cells, as dummy particles are outside the domain they are only found if the search region is outside
// the domain.
const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _haloBoxMin);
const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _haloBoxMax);
if (not _builder) {
// if no builder exists the clusters have not been built yet and all particles are stored in the first tower.
return std::make_tuple(lowerCornerInBounds, upperCornerInBounds, std::vector<size_t>{0});
}
// Find towers intersecting the search region
auto firstTowerCoords = _builder->getTowerCoordinates(lowerCornerInBounds);
auto firstTowerIndex = _builder->towerIndex2DTo1D(firstTowerCoords[0], firstTowerCoords[1]);
auto lastTowerCoords = _builder->getTowerCoordinates(upperCornerInBounds);
auto lastTowerIndex = _builder->towerIndex2DTo1D(lastTowerCoords[0], lastTowerCoords[1]);
std::array<size_t, 2> towersOfInterstPerDim;
for (size_t dim = 0; dim < towersOfInterstPerDim.size(); ++dim) {
// use ternary operators instead of abs because these are unsigned values
towersOfInterstPerDim[dim] = firstTowerCoords[dim] > lastTowerCoords[dim]
? firstTowerCoords[dim] - lastTowerCoords[dim]
: lastTowerCoords[dim] - firstTowerCoords[dim];
// +1 because we want to include first AND last
towersOfInterstPerDim[dim] += 1;
// sanity check
towersOfInterstPerDim[dim] = std::max(towersOfInterstPerDim[dim], static_cast<size_t>(1));
}
std::vector<size_t> towersOfInterest(towersOfInterstPerDim[0] * towersOfInterstPerDim[1]);
auto towersOfInterestIterator = towersOfInterest.begin();
for (size_t i = 0; i < towersOfInterstPerDim[1]; ++i) {
std::iota(towersOfInterestIterator, towersOfInterestIterator + towersOfInterstPerDim[0],
std::min(firstTowerIndex, lastTowerIndex) + i * _towersPerDim[0]);
towersOfInterestIterator += towersOfInterstPerDim[0];
}
return std::make_tuple(lowerCornerInBounds, upperCornerInBounds, towersOfInterest);
}
/**
* The number of particles in a full cluster.
*/
size_t _clusterSize;
/**
* internal storage, particles are split into a grid in xy-dimension
*/
std::vector<internal::ClusterTower<Particle>> _towers;
/**
* Dimensions of the 2D xy-grid.
*/
std::array<size_t, 2> _towersPerDim{};
/**
* Side length of xy-grid.
*/
double _towerSideLength{0.};
double _towerSideLengthReciprocal{0.};
/**
* The number of clusters in the container.
*/
size_t _numClusters;
/**
* The interaction length in number of towers it reaches.
* static_cast<int>(std::ceil((this->getInteractionLength()) * _towerSideLengthReciprocal))
*/
int _numTowersPerInteractionLength;
/**
* Contains all particles that should be added to the container during the next rebuild.
* Outer vector is for Thread buffer to allow parallel particle insertion.
*/
std::vector<std::vector<Particle>> _particlesToAdd;
/**
* Checks if there are particles in at least one thread buffer of _particlesToAdd.
* @return true iff all thread buffer are empty.
*/
[[nodiscard]] bool particlesToAddEmpty() const {
for (auto &threadBuffer : _particlesToAdd) {
if (not threadBuffer.empty()) {
return false;
}
}
return true;
}
/**
* Defines a partition of the clusters to a number of threads.
*/
std::vector<ClusterRange> _clusterThreadPartition;
/**
* Minimum of the container.
*/
std::array<double, 3> _boxMin{};
/**
* Maximum of the container.
*/
std::array<double, 3> _boxMax{};
/**
* Minimum of the container including halo.
*/
std::array<double, 3> _haloBoxMin{};
/**
* Maximum of the container including halo.
*/
std::array<double, 3> _haloBoxMax{};
/**
* Cutoff.
*/
double _cutoff{};
/**
* Skin.
*/
double _skin{};
/**
* Enum to specify the validity of this container.
*/
enum class ValidityState : unsigned char {
invalid = 0, // nothing is valid.
cellsValidListsInvalid = 1, // only the cell structure is valid, but the lists are not.
cellsAndListsValid = 2 // the cells and lists are valid
};
/**
* Indicates, whether the current container structure (mainly for region iterators) and the verlet lists are valid.
*/
std::atomic<ValidityState> _isValid{ValidityState::invalid};
/**
* The builder for the verlet cluster lists.
*/
std::unique_ptr<internal::VerletClusterListsRebuilder<Particle>> _builder;
/**
* The flag manager of this container.
*/
internal::UnknowingCellBorderAndFlagManager unknowingCellBorderAndFlagManager;
};
} // namespace autopas
|
make_general_basis.h | #ifndef _MAKE_GENERAL_BASIS_H
#define _MAKE_GENERAL_BASIS_H
#include <iostream>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "openmp.h"
#include "misc.h"
#include <cmath>
#include <cfloat>
#include <vector>
#include <utility>
#include <algorithm>
#include <functional>
namespace basis_general {
template<class I,class J,class P=signed char>
npy_intp make_basis_sequential(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I basis[],J n[]){
npy_intp Ns = 0;
I s = 0;
bool insuff_mem = false;
while(MAX != 0){
if(Ns>=mem_MAX){
insuff_mem = true;
break;
}
double norm = B->check_state(s);
npy_intp int_norm = norm;
if(!check_nan(norm) && int_norm>0 ){
basis[Ns] = s;
n[Ns] = norm;
Ns++;
}
s++;
MAX--;
}
if(insuff_mem){
return -1;
}
else{
return Ns;
}
}
template<class I,class J,class P=signed char>
npy_intp make_basis_pcon_sequential(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I s,I basis[],J n[]){
npy_intp Ns = 0;
I nns = 0; // number of next_state calls
bool insuff_mem = false;
while(MAX!=0){
if(Ns>=mem_MAX){
insuff_mem = true;
break;
}
double norm = B->check_state(s);
npy_intp int_norm = norm;
if(!check_nan(norm) && int_norm>0 ){
basis[Ns] = s;
n[Ns] = norm;
Ns++;
}
s = B->next_state_pcon(s,nns++);
MAX--;
}
if(insuff_mem){
return -1;
}
else{
return Ns;
}
}
template<class I, class J>
struct compare_pair : std::binary_function<std::pair<I,J>,std::pair<I,J>,bool>
{
bool operator()(std::pair<I,J> a, std::pair<I,J> b){return a.first < b.first;}
};
template<class I,class J,class P=signed char>
npy_intp make_basis_parallel(general_basis_core<I,P> *B,const npy_intp MAX,const npy_intp mem_MAX,I basis[],J n[]){
npy_intp Ns = 0;
bool insuff_mem = false;
std::vector<std::pair<I,J> > master_block(mem_MAX);
std::vector<npy_intp> master_pos(omp_get_max_threads()+1);
std::pair<I,J> * master_block_data = &master_block[0];
npy_intp * master_pos_data = &master_pos[0];
#pragma omp parallel firstprivate(MAX) shared(master_block_data,master_pos_data,Ns,insuff_mem)
{
const int nthread = omp_get_num_threads();
const int threadn = omp_get_thread_num();
std::vector<std::pair<I,J> > thread_block(0);
const npy_intp block_size = 1.1*mem_MAX/nthread;
thread_block.reserve(block_size);
npy_intp chunk = MAX - threadn;
I s = threadn;
while(chunk>0 && !insuff_mem){
double norm = B->check_state(s);
npy_intp int_norm = norm;
if(!check_nan(norm) && int_norm>0 ){
thread_block.push_back(std::make_pair(s,int_norm));
#pragma omp atomic
Ns++;
}
s += nthread;
chunk-=nthread;
if(Ns>=mem_MAX){
#pragma omp critical
insuff_mem=true;
}
}
#pragma omp barrier // wait for all threads to finish searching.
if(!insuff_mem){
master_pos_data[threadn+1] = thread_block.size(); // get sizes for each thread block into shared memory
#pragma omp barrier
#pragma omp single // calculate the cumulative sum to get data paritions of master_block
{
for(int i=0;i<nthread;i++){
master_pos_data[i+1] += master_pos_data[i];
}
}
// load data into master block in parallel
const npy_intp start = master_pos_data[threadn];
const npy_intp end = master_pos_data[threadn+1];
npy_intp i = 0;
for(npy_intp j=start;j<end;j++){
master_block_data[j] = thread_block[i++];
}
#pragma omp barrier
#pragma omp single
{
std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>());
}
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
basis[i] = master_block_data[i].first;
n[i] = master_block_data[i].second;
}
}
}
if(insuff_mem){
return -1;
}
else{
// master_block.resize(Ns);
// std::sort(master_block.begin(),master_block.end(), compare_pair<I,J>());
// for(npy_intp i=0;i<Ns;i++){
// basis[i] = master_block[i].first;
// n[i] = master_block[i].second;
// }
return Ns;
}
}
template<class I,class J,class P=signed char>
npy_intp make_basis_pcon_parallel(general_basis_core<I,P> *B,const npy_intp MAX,const npy_intp mem_MAX,I s,I basis[],J n[]){
npy_intp Ns = 0;
bool insuff_mem = false;
std::vector<std::pair<I,J> > master_block(mem_MAX);
std::vector<npy_intp> master_pos(omp_get_max_threads()+1);
std::pair<I,J> * master_block_data = &master_block[0];
npy_intp * master_pos_data = &master_pos[0];
#pragma omp parallel firstprivate(MAX,s) shared(master_block_data,master_pos_data,Ns,insuff_mem)
{
const int nthread = omp_get_num_threads();
const int threadn = omp_get_thread_num();
std::vector<std::pair<I,J> > thread_block(0); // local array to store values found by each thread. this reduces the number of critical sections.
const npy_intp block_size = 1.1*mem_MAX/nthread;
thread_block.reserve(block_size); // preallocate memory for each block so that it does not have to expand during search.
npy_intp chunk = MAX - threadn;
I nns = 0;// number of next_state calls
for(int i=0;i<threadn;i++){s=B->next_state_pcon(s,nns++);}
while(chunk>0 && !insuff_mem){
double norm = B->check_state(s);
npy_intp int_norm = norm;
if(!check_nan(norm) && int_norm>0 ){
thread_block.push_back(std::make_pair(s,int_norm));
#pragma omp atomic
Ns++;
}
for(int i=0;i<nthread;i++){s=B->next_state_pcon(s,nns++);}
chunk-=nthread;
if(Ns>=mem_MAX){
#pragma omp critical
insuff_mem=true;
}
}
#pragma omp barrier
if(!insuff_mem){
master_pos_data[threadn+1] = thread_block.size();
#pragma omp barrier
#pragma omp single
{
for(int i=0;i<nthread;i++){
master_pos_data[i+1] += master_pos_data[i];
}
}
const npy_intp start = master_pos_data[threadn];
const npy_intp end = master_pos_data[threadn+1];
npy_intp i = 0;
for(npy_intp j=start;j<end;j++){
master_block_data[j] = thread_block[i++];
}
#pragma omp barrier
#pragma omp single
{
std::sort(master_block_data, master_block_data + Ns, compare_pair<I,J>());
}
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
basis[i] = master_block_data[i].first;
n[i] = master_block_data[i].second;
}
}
}
if(insuff_mem){
return -1;
}
else{
// sort list based on basis and then fill ndarray values with the sorted list.
// master_block.resize(Ns);
// std::sort(master_block.begin(),master_block.end(), compare_pair<I,J>());
// for(npy_intp i=0;i<Ns;i++){
// basis[i] = master_block[i].first;
// n[i] = master_block[i].second;
// }
return Ns;
}
}
template<class I,class J,class P=signed char>
npy_intp make_basis(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I basis[],J n[]){
const int nt = B->get_nt();
const int nthreads = omp_get_max_threads();
if(nthreads>1 && MAX > nthreads && nt>0){
return make_basis_parallel(B,MAX,mem_MAX,basis,n);
}
else{
return make_basis_sequential(B,MAX,mem_MAX,basis,n);
}
}
template<class I,class J,class P=signed char>
npy_intp make_basis_pcon(general_basis_core<I,P> *B,npy_intp MAX,npy_intp mem_MAX,I s,I basis[],J n[]){
const int nt = B->get_nt();
const int nthreads = omp_get_max_threads();
if(nthreads>1 && MAX > nthreads && nt>0){
return make_basis_pcon_parallel(B,MAX,mem_MAX,s,basis,n);
}
else{
return make_basis_pcon_sequential(B,MAX,mem_MAX,s,basis,n);
}
}
// template<class I,class J>
// npy_intp inline make_basis_wrapper(void *B,npy_intp MAX,npy_intp mem_MAX,void * basis,J n[]){
// return make_basis(reinterpret_cast<general_basis_core<I> *>(B),MAX,mem_MAX,(I*)basis,n);
// }
// template<class I,class J>
// npy_intp inline make_basis_pcon_wrapper(void *B,npy_intp MAX,npy_intp mem_MAX,npy_uint64 s,void * basis,J n[]){
// return make_basis_pcon(reinterpret_cast<general_basis_core<I> *>(B),MAX,mem_MAX,(I)s,(I*)basis,n);
// }
}
#endif
|
line_search_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY)
#define KRATOS_LINE_SEARCH_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/strategies/line_search_strategy.h"
#include "utilities/openmp_utils.h"
#include "utilities/variable_utils.h"
// Convergence criterias
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// Default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
// TODO: Extend the descriptions
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** \brief Short class definition.
This class
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class LineSearchContactStrategy :
public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType;
typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef std::size_t IndexType;
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag )
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~LineSearchContactStrategy() override
= default;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mRecalculateFactor; // To check if we recalculate or not the scale factor
///@}
///@name Protected Operators
///@{
/**
* Performs all the required operations that should be done (for each step)
* before solving the solution step.
* A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
// TODO: Add something if necessary
}
/**
* Here the database is updated
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
typename TSchemeType::Pointer pScheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement
TSystemVectorType aux(b.size()); //TODO: do it by using the space
TSparseSpace::Assign(aux, 0.5, Dx);
TSystemVectorType DxDisp(b.size());
TSystemVectorType DxLM(b.size());
ComputeSplitDx(Dx, DxDisp, DxLM);
// Compute residual without update
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double roDisp;
double roLM;
ComputeMixedResidual(b, roDisp, roLM);
// Compute half step residual
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rhDisp;
double rhLM;
ComputeMixedResidual(b, rhDisp, rhLM);
// Compute full step residual (add another half Dx to the previous half)
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rfDisp;
double rfLM;
ComputeMixedResidual(b, rfDisp, rfLM);
// We compute the parabola
double XminDisp = 1e-3;
double XmaxDisp = 1.0;
double XminLM = 1e-3;
double XmaxLM = 1.0;
ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp);
ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM);
// Perform final update
TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp);
TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM);
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
}
/**
* This method split the vector of increment of DoF in displacement and LM
* @param Dx The increment of displacements and LM
* @param DxDisp The increment of displacements
* @param DxLM The increment of LM
*/
void ComputeSplitDx(
TSystemVectorType& Dx,
TSystemVectorType& DxDisp,
TSystemVectorType& DxLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (itDoF)->EquationId();
std::size_t CurrVar = (itDoF)->GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
DxDisp[j] = Dx[j];
DxLM[j] = 0.0;
}
else // Corresponding with contact
{
DxDisp[j] = 0.0;
DxLM[j] = Dx[j];
}
}
}
}
/**
* This method calculates the norm considering one norm for the displacement and other norm for the LM
* @param b The residual vector
* @param normDisp normDisp: The norm of the displacement
* @param normLM The norm of the LM
*/
void ComputeMixedResidual(
TSystemVectorType& b,
double& normDisp,
double& normLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (itDoF)->EquationId();
std::size_t CurrVar = (itDoF)->GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
#pragma omp atomic
normDisp += b[j] * b[j];
}
else // Corresponding with contact
{
#pragma omp atomic
normLM += b[j] * b[j];
}
}
}
normDisp = std::sqrt(normDisp);
normLM = std::sqrt(normLM);
}
/**
* This method computes the parabola necessary for the line search
* @param Xmax The maximal abscissa
* @param Xmin The norm of the LM
* @param rf The residual norm of the full step
* @param ro The residual norm without step
* @param rh The residual norm of the half step
*/
void ComputeParabola(
double& Xmax,
double& Xmin,
const double rf,
const double ro,
const double rh
)
{
// Compute optimal (limited to the range 0-1)
// Parabola is y = a*x^2 + b*x + c -> min/max for
// x=0 --> r=ro
// x=1/2 --> r=rh
// x=1 --> r =
// c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro
// max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh);
const double parabole_a = 2 * rf + 2 * ro - 4 * rh;
const double parabole_b = 4 * rh - rf - 3 * ro;
if( parabole_a > 0.0) // If parabola has a local minima
{
Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a
if( Xmax > 1.0)
Xmax = 1.0;
else if(Xmax < -1.0)
Xmax = -1.0;
}
else // Parabola degenerates to either a line or to have a local max. best solution on either extreme
{
if(rf < ro)
Xmax = 1.0;
else
Xmax = Xmin; // Should be zero, but otherwise it will stagnate
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
LineSearchContactStrategy(const LineSearchContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class LineSearchContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
|
gimple.h | /* Gimple IR definitions.
Copyright (C) 2007-2013 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "ggc.h"
#include "basic-block.h"
#include "tree.h"
#include "tree-ssa-operands.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
typedef gimple gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef vec<gimple> gimple_vec;
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_FROM_THUNK = 1 << 0,
GF_CALL_RETURN_SLOT_OPT = 1 << 1,
GF_CALL_TAILCALL = 1 << 2,
GF_CALL_VA_ARG_PACK = 1 << 3,
GF_CALL_NOTHROW = 1 << 4,
GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
GF_CALL_INTERNAL = 1 << 6,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_OMP_ATOMIC_NEED_VALUE = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there are only two types of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0,
GIMPLE_DEBUG_SOURCE_BIND = 1
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq *seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY((chain_next ("%h.next"))) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
basic_block bb;
/* [ WORD 4-5 ]
Linked lists of gimple statements. The next pointers form
a NULL terminated list, the prev pointers are a cyclic list.
A gimple statement is hence also a double-ended list of
statements, with the pointer itself being the first element,
and the prev pointer being the last. */
gimple next;
gimple GTY((skip)) prev;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-7 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 8 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-7 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 8-9 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
struct GTY(()) gimple_statement_call
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10-13 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 14 ] */
union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
/* [ WORD 15 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 8 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 9 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree types;
/* [ WORD 8 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Filter types. */
tree types;
/* [ WORD 8 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_ELSE */
struct GTY(()) gimple_statement_eh_else {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 8 ] */
tree result;
/* [ WORD 9 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 8 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 7 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10 ]
__asm__ statement. */
const char *string;
/* [ WORD 11 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 12 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 10 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 11 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ]
Clauses. */
tree clauses;
/* [ WORD 9 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 10 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-10 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 12-13 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree control_def;
/* [ WORD 8 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 7 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7-8 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree val;
};
/* GIMPLE_TRANSACTION. */
/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
/* The __transaction_atomic was declared [[outer]] or it is
__transaction_relaxed. */
#define GTMA_IS_OUTER (1u << 0)
#define GTMA_IS_RELAXED (1u << 1)
#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
/* The transaction is seen to not have an abort. */
#define GTMA_HAVE_ABORT (1u << 2)
/* The transaction is seen to have loads or stores. */
#define GTMA_HAVE_LOAD (1u << 3)
#define GTMA_HAVE_STORE (1u << 4)
/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
/* The transaction WILL enter serial irrevocable mode.
An irrevocable block post-dominates the entire transaction, such
that all invocations of the transaction will go serial-irrevocable.
In such case, we don't bother instrumenting the transaction, and
tell the runtime that it should begin the transaction in
serial-irrevocable mode. */
#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
/* The transaction contains no instrumentation code whatsover, most
likely because it is guaranteed to go irrevocable upon entry. */
#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
struct GTY(()) gimple_statement_transaction
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base gsbase;
/* [ WORD 10 ] */
gimple_seq body;
/* [ WORD 11 ] */
tree label;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"),
chain_next ("%h.gsbase.next"), variable_size)) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *);
gimple
gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree CXX_MEM_STAT_INFO);
gimple
gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, vec<tree> );
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> );
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
vec<tree, va_gc> *, vec<tree, va_gc> *,
vec<tree, va_gc> *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (tree, tree, vec<tree> );
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (vec<tree> );
void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
bool gimple_call_same_target_p (const_gimple, const_gimple);
int gimple_call_flags (const_gimple);
int gimple_call_return_flags (const_gimple);
int gimple_call_arg_flags (const_gimple, unsigned);
void gimple_call_reset_alias_info (gimple);
bool gimple_assign_is_zero_sign_ext_redundant (gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, basic_block);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_could_trap_p_1 (gimple, bool, bool);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree);
tree gimple_extract_devirt_binfo_from_cst (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid address operand of a MEM_REF. */
bool is_gimple_mem_ref_addr (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
/* Return TRUE iff stmt is a call to a built-in function. */
extern bool is_gimple_builtin_call (gimple stmt);
extern void recalculate_side_effects (tree);
extern bool gimple_compare_field_offset (tree, tree);
extern tree gimple_register_canonical_type (tree);
extern void print_gimple_types_stats (const char *);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool walk_stmt_load_store_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool gimple_ior_addresses_taken (bitmap, gimple);
extern bool gimple_call_builtin_p (gimple, enum built_in_class);
extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree create_tmp_reg (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
vec<gimple> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
vec<tree> case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
bool in_cleanup_point_expr;
};
/* Return true if gimplify_one_sizepos doesn't need to gimplify
expr (when in TYPE_SIZE{,_UNIT} and similar type/decl size/bitsize
fields). */
static inline bool
is_gimple_sizepos (tree expr)
{
/* gimplify_one_sizepos doesn't need to do anything if the value isn't there,
is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do
anything if it's already a VAR_DECL. If it's a VAR_DECL from another
function, the gimplifier will want to replace it with a new variable,
but that will cause problems if this type is from outside the function.
It's OK to have that here. */
return (expr == NULL_TREE
|| TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr));
}
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
enum gimplify_status gimplify_self_mod_expr (tree *, gimple_seq *, gimple_seq *,
bool, tree);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern vec<gimple> gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In trans-mem.c. */
extern void diagnose_tm_safe_errors (tree);
extern void compute_transaction_bits (void);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* In tree-ssa-coalesce.c */
extern bool gimple_can_coalesce_p (tree, tree);
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (gimple_seq s)
{
return s;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return n;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
return s ? s->gsbase.prev : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return n;
}
/* Set the last node in GIMPLE sequence *PS to LAST. */
static inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
(*ps)->gsbase.prev = last;
}
/* Set the first node in GIMPLE sequence *PS to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
{
*ps = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (gimple_seq s)
{
return s == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
}
static inline gimple_seq *
bb_seq_addr (basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
bb->il.gimple.seq = seq;
}
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
case GIMPLE_TRANSACTION:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline basic_block
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return LOCATION_BLOCK (g->gsbase.location);
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
if (block)
g->gsbase.location =
COMBINE_LOCATION_DATA (line_table, g->gsbase.location, block);
else
g->gsbase.location = LOCATION_LOCUS (g->gsbase.location);
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Make statement G a singleton sequence. */
static inline void
gimple_init_singleton (gimple g)
{
g->gsbase.next = NULL;
g->gsbase.prev = g;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_gimple_checking_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
if (g->gsmembase.vdef)
return &g->gsmembase.vdef;
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
a MODIFIED field. */
static inline void
gimple_set_modified (gimple s, bool modifiedp)
{
if (gimple_has_ops (s))
s->gsbase.modified = (unsigned) modifiedp;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
return CALL_EXPR;
}
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if BB is in a transaction. */
static inline bool
block_in_transaction (basic_block bb)
{
return flag_tm && bb->flags & BB_IN_TRANSACTION;
}
/* Return true if STMT is in a transaction. */
static inline bool
gimple_in_transaction (gimple stmt)
{
return block_in_transaction (gimple_bb (stmt));
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
static inline bool
gimple_omp_atomic_need_value_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
}
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
static inline void
gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_gimple_checking_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* A wrapper around extract_ops_from_tree_1, for callers which expect
to see only a maximum of two operands. */
static inline void
extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
tree *op1)
{
tree op2;
extract_ops_from_tree_1 (expr, code, op0, op1, &op2);
gcc_assert (op2 == NULL_TREE);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = (enum tree_code) gs->gsbase.subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if GS is an assignment with a singleton RHS, i.e.,
there is no operator associated with the assignment itself.
Unlike gimple_assign_copy_p, this predicate returns true for
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
static inline bool
gimple_assign_single_p (gimple gs)
{
return (is_gimple_assign (gs)
&& gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
}
/* Return true if GS performs a store to its lhs. */
static inline bool
gimple_store_p (gimple gs)
{
tree lhs = gimple_get_lhs (gs);
return lhs && !is_gimple_reg (lhs);
}
/* Return true if GS is an assignment that loads from its rhs1. */
static inline bool
gimple_assign_load_p (gimple gs)
{
tree rhs;
if (!gimple_assign_single_p (gs))
return false;
rhs = gimple_assign_rhs1 (gs);
if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
return true;
rhs = get_base_address (rhs);
return (DECL_P (rhs)
|| TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF);
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if S is a clobber statement. */
static inline bool
gimple_clobber_p (gimple s)
{
return gimple_assign_single_p (s)
&& TREE_CLOBBER_P (gimple_assign_rhs1 (s));
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0;
}
/* Return the target of internal call GS. */
static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return gs->gimple_call.u.internal_fn;
}
/* Return the function type of the function called by GS. */
static inline tree
gimple_call_fntype (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
if (gimple_call_internal_p (gs))
return NULL_TREE;
return gs->gimple_call.u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gs->gimple_call.u.fntype = fntype;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* Set internal function FN to be the function called by call statement GS. */
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
gs->gimple_call.u.internal_fn = fn;
}
/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL
associated with the callee if known. Otherwise return NULL_TREE. */
static inline tree
gimple_call_addr_fndecl (const_tree fn)
{
if (fn && TREE_CODE (fn) == ADDR_EXPR)
{
tree fndecl = TREE_OPERAND (fn, 0);
if (TREE_CODE (fndecl) == MEM_REF
&& TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR
&& integer_zerop (TREE_OPERAND (fndecl, 1)))
fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0);
if (TREE_CODE (fndecl) == FUNCTION_DECL)
return fndecl;
}
return NULL_TREE;
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree type = gimple_call_fntype (gs);
if (type == NULL_TREE)
return TREE_TYPE (gimple_call_lhs (gs));
/* The type returned by a function is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
static inline void
gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_used;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_clobbered;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
static inline gimple_seq *
gimple_bind_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return &gs->gimple_bind.body;
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
return *gimple_bind_body_ptr (gs);
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
return gimple_op (gs, index + gs->gimple_asm.no);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
return gimple_op_ptr (gs, index + gs->gimple_asm.no);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni
&& TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.no, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no);
return gimple_op (gs, index);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no);
return gimple_op_ptr (gs, index);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nc
&& TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nl
&& TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
return *gimple_catch_handler_ptr (gs);
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return a pointer to the sequence of statement to execute when
GIMPLE_EH_FILTER statement fails. */
static inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.failure;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
return *gimple_eh_filter_failure_ptr (gs);
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
static inline gimple_seq *
gimple_eh_else_n_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return &gs->gimple_eh_else.n_body;
}
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
return *gimple_eh_else_n_body_ptr (gs);
}
static inline gimple_seq *
gimple_eh_else_e_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return &gs->gimple_eh_else.e_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
return *gimple_eh_else_e_body_ptr (gs);
}
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.e_body = seq;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return a pointer to the sequence of statements used as the
body for GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_eval_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return &gs->gimple_try.eval;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
return *gimple_try_eval_ptr (gs);
}
/* Return a pointer to the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_cleanup_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return &gs->gimple_try.cleanup;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
return *gimple_try_cleanup_ptr (gs);
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return a pointer to the cleanup sequence for cleanup statement GS. */
static inline gimple_seq *
gimple_wce_cleanup_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return &gs->gimple_wce.cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
return *gimple_wce_cleanup_ptr (gs);
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
if (result && TREE_CODE (result) == SSA_NAME)
SSA_NAME_DEF_STMT (result) = gs;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs);
gs->gimple_phi.args[index] = *phiarg;
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_gimple_checking_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
&& (label == NULL_TREE
|| TREE_CODE (label) == CASE_LABEL_EXPR));
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
tree label = gimple_switch_label (gs, 0);
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
return label;
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline tree
gimple_debug_source_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline tree
gimple_debug_source_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
static inline tree *
gimple_debug_source_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline void
gimple_debug_source_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline void
gimple_debug_source_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* Return a pointer to the body for the OMP statement GS. */
static inline gimple_seq *
gimple_omp_body_ptr (gimple gs)
{
return &gs->omp.body;
}
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return *gimple_omp_body_ptr (gs);
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return a pointer to the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.pre_body;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
return *gimple_omp_for_pre_body_ptr (gs);
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
&& i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq *
gimple_transaction_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return &gs->gimple_transaction.body;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
return *gimple_transaction_body_ptr (gs);
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
static inline tree
gimple_transaction_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gimple_transaction.label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return &gs->gimple_transaction.label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gsbase.subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gsbase.subcode = subcode;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statement STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return true if TYPE is a suitable type for a scalar register variable. */
static inline bool
is_gimple_reg_type (tree type)
{
return !AGGREGATE_TYPE_P (type);
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (*seq);
i.seq = seq;
i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
#define gsi_start(x) gsi_start_1(&(x))
static inline gimple_stmt_iterator
gsi_none (void)
{
gimple_stmt_iterator i;
i.ptr = NULL;
i.seq = NULL;
i.bb = NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq *seq;
seq = bb_seq_addr (bb);
i.ptr = gimple_seq_first (*seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (*seq);
i.seq = seq;
i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
#define gsi_last(x) gsi_last_1(&(x))
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq *seq;
seq = bb_seq_addr (bb);
i.ptr = gimple_seq_last (*seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->gsbase.next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->gsbase.next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
gimple prev = i->ptr->gsbase.prev;
if (prev->gsbase.next)
i->ptr = prev;
else
i->ptr = NULL;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return *i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *);
void gsi_set_stmt (gimple_stmt_iterator *, gimple);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
bool gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, basic_block);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
BOOL_BITFIELD val_only : 1;
/* True if we are currently walking the LHS of an assignment. */
BOOL_BITFIELD is_lhs : 1;
/* Optional. Set to true by the callback functions if they made any
changes. */
BOOL_BITFIELD changed : 1;
/* True if we're interested in location information. */
BOOL_BITFIELD want_locations : 1;
/* True if we've removed the statement that was processed. */
BOOL_BITFIELD removed_stmt : 1;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
gimple walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
extern void dump_gimple_statistics (void);
/* In gimple-fold.c. */
void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree);
tree gimple_fold_builtin (gimple);
bool fold_stmt (gimple_stmt_iterator *);
bool fold_stmt_inplace (gimple_stmt_iterator *);
tree get_symbol_constant_value (tree);
tree canonicalize_constructor_val (tree, tree);
extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
bool gimple_val_nonnegative_real_p (tree);
#endif /* GCC_GIMPLE_H */
|
flexFullMatrix.h | #ifndef flexFullMatrix_H
#define flexFullMatrix_H
#include "flexLinearOperator.h"
#include <vector>
//! represents a full (non-CUDA) matrix
template<typename T>
class flexFullMatrix : public flexLinearOperator<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
private:
Tdata valueList;
public:
//! initializes an empty matrix
flexFullMatrix() : valueList(), flexLinearOperator<T>(0, 0, matrixOp, false) {};
//! initializes a matrix
/*!
\param aNumRows number of rows
\param aNumCols number of cols
\param aMinus determines if operator is negated \sa isMinus
*/
flexFullMatrix(int aNumRows, int aNumCols, bool aMinus) : valueList(aNumRows*aNumCols, 0), flexLinearOperator<T>(aNumRows, aNumCols, matrixOp, aMinus){};
flexFullMatrix<T>* copy()
{
flexFullMatrix<T>* A = new flexFullMatrix<T>(this->getNumRows(), this->getNumCols(), this->isMinus);
A->valueList = valueList;
return A;
}
void times(bool transposed, const Tdata &input, Tdata &output)
{
}
void timesPlus(bool transposed, const Tdata &input, Tdata &output)
{
if (this->isMinus)
{
doTimesCPU(transposed, input, output,MINUS);
}
else
{
doTimesCPU(transposed, input, output,PLUS);
}
}
void timesMinus(bool transposed, const Tdata &input, Tdata &output)
{
if (this->isMinus)
{
doTimesCPU(transposed, input, output,PLUS);
}
else
{
doTimesCPU(transposed, input, output,MINUS);
}
}
//inserts new matrix element val at position [i][j]
void insertElement(int i, int j, T val)
{
this->valueList[index2DtoLinear(i,j)] = val;
}
void insertElement(int i, T val)
{
this->valueList[i] = val;
}
int index2DtoLinear(int i, int j)
{
return i + j*this->getNumRows();
}
T getMaxRowSumAbs(bool transposed)
{
std::vector<T> rowSum = this->getAbsRowSum(transposed);
return *std::max_element(rowSum.begin(), rowSum.end());
}
std::vector<T> getAbsRowSum(bool transposed)
{
if (transposed)
{
std::vector<T> result(this->getNumCols(), (T)0);
for (int i = 0; i < this->getNumRows(); ++i)
{
for (int j = 0; j < this->getNumCols(); ++j)
{
result[j] += std::abs(valueList[index2DtoLinear(i,j)]);
}
}
/*for (int i = 0; i < this->getNumCols(); ++i)
{
printf("T %f\n", result[i]);
}*/
return result;
}
else
{
std::vector<T> result(this->getNumRows(),(T)0);
for (int i = 0; i < this->getNumRows(); ++i)
{
for (int j = 0; j < this->getNumCols(); ++j)
{
result[i] += std::abs(valueList[index2DtoLinear(i, j)]);
}
}
/*for (int i = 0; i < this->getNumRows(); ++i)
{
printf(" %f\n", result[i]);
}*/
return result;
}
}
//! prints requested row
/*!
\param i row to be printed
*/
void printRow(int i)
{
for (int j = 0; j < this->getNumCols(); ++j)
{
printf("(%d,%d,%f)|", i, j, valueList[index2DtoLinear(i, j)]);
}
printf("\n");
}
//! prints the whole matrix
void printMatrix()
{
for (int i = 0; i < this->getNumRows(); i++)
{
printRow(i);
}
}
//DUMMY FUNCTION
#ifdef __CUDACC__
thrust::device_vector<T> getAbsRowSumCUDA(bool transposed)
{
thrust::device_vector<T> result(this->getNumRows(), (T)1);
return result;
}
#endif
private:
void doTimesCPU(bool transposed, const Tdata &input, Tdata &output,const mySign s)
{
if (transposed)
{
#pragma omp parallel for
for (int j = 0; j < this->getNumCols(); ++j)
{
T tmp = static_cast<T>(0);
for (int i = 0; i < this->getNumRows(); ++i)
{
tmp += input[i] * valueList[index2DtoLinear(i, j)];
}
switch (s)
{
case PLUS:
{
output[j] += tmp;
break;
}
case MINUS:
{
output[j] -= tmp;
break;
}
}
}
}
else
{
for (int j = 0; j < this->getNumCols(); ++j)
{
T tmp = input[j];
#pragma omp parallel for
for (int i = 0; i < this->getNumRows(); ++i)
{
switch (s)
{
case PLUS:
{
output[i] += tmp * valueList[index2DtoLinear(i, j)];
break;
}
case MINUS:
{
output[i] -= tmp * valueList[index2DtoLinear(i, j)];
break;
}
}
}
}
}
}
};
#endif
|
api.c | // RUN: %libomptarget-compile-run-and-check-generic
// XFAIL: nvptx64-nvidia-cuda
// XFAIL: nvptx64-nvidia-cuda
// XFAIL: nvptx64-nvidia-cuda-newDriver
// Fails on amdgpu with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-newDriver
#include <stdio.h>
#include <omp.h>
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
extern void __tgt_register_requires(int64_t);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
#pragma omp requires unified_shared_memory
#define N 1024
void init(int A[], int B[], int C[]) {
for (int i = 0; i < N; ++i) {
A[i] = 0;
B[i] = 1;
C[i] = i;
}
}
int main(int argc, char *argv[]) {
const int device = omp_get_default_device();
// Manual registration of requires flags for Clang versions
// that do not support requires.
__tgt_register_requires(8);
// CHECK: Initial device: [[INITIAL_DEVICE:[0-9]+]]
printf("Initial device: %d\n", omp_get_initial_device());
// CHECK: Num devices: [[INITIAL_DEVICE]]
printf("Num devices: %d\n", omp_get_num_devices());
//
// Target alloc & target memcpy
//
int A[N], B[N], C[N];
// Init
init(A, B, C);
int *pA, *pB, *pC;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
int *d_A = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_B = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_C = (int *)omp_target_alloc(N * sizeof(int), device);
// CHECK: omp_target_alloc succeeded
printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed");
omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
#pragma omp target is_device_ptr(d_A, d_B, d_C) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++) {
d_A[i] = d_B[i] + d_C[i] + 1;
}
}
omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(),
device);
// CHECK: Test omp_target_memcpy: Succeeded
int fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_memcpy: Failed\n");
} else {
printf("Test omp_target_memcpy: Succeeded\n");
}
//
// target_is_present and target_associate/disassociate_ptr
//
init(A, B, C);
// CHECK: B is not present, associating it...
// CHECK: omp_target_associate_ptr B succeeded
if (!omp_target_is_present(B, device)) {
printf("B is not present, associating it...\n");
int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: C is not present, associating it...
// CHECK: omp_target_associate_ptr C succeeded
if (!omp_target_is_present(C, device)) {
printf("C is not present, associating it...\n");
int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Inside target data: A is not present
// CHECK: Inside target data: B is present
// CHECK: Inside target data: C is present
#pragma omp target data map(from : B, C) device(device)
{
printf("Inside target data: A is%s present\n",
omp_target_is_present(A, device) ? "" : " not");
printf("Inside target data: B is%s present\n",
omp_target_is_present(B, device) ? "" : " not");
printf("Inside target data: C is%s present\n",
omp_target_is_present(C, device) ? "" : " not");
#pragma omp target map(from : A) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++)
A[i] = B[i] + C[i] + 1;
}
}
// CHECK: B is present, disassociating it...
// CHECK: omp_target_disassociate_ptr B succeeded
// CHECK: C is present, disassociating it...
// CHECK: omp_target_disassociate_ptr C succeeded
if (omp_target_is_present(B, device)) {
printf("B is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(B, device);
printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
if (omp_target_is_present(C, device)) {
printf("C is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(C, device);
printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Test omp_target_associate_ptr: Succeeded
fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_associate_ptr: Failed\n");
} else {
printf("Test omp_target_associate_ptr: Succeeded\n");
}
omp_target_free(d_A, device);
omp_target_free(d_B, device);
omp_target_free(d_C, device);
printf("Done!\n");
return 0;
}
|
dsblock5.c | /* Begin dsblock5.c */
/* File version: 1.4, 1998-03-20 */
/*
* Copyright (C) 1997-2001 Dynasim AB.
* All rights reserved.
*
*/
}
#undef externalTable_
#ifdef NBR_TASKS
DYMOLA_STATIC int nbrTasks_=NBR_TASKS;
#else
DYMOLA_STATIC int nbrTasks_=0;
#endif
#if !defined(DYMOLA_DSPACE)
DYMOLA_STATIC double
DymolaStartTimers_[
#ifdef NrDymolaTimers_
NrDymolaTimers_ ? NrDymolaTimers_ : 1
#else
1
#endif
];
DYMOLA_STATIC double DymolaTimeZero[100000]={0};
DYMOLA_STATIC int DymolaTimeZeroLength=100000;
#endif
#if !defined(DymolaHaveUpdateInitVars)
DYMOLA_STATIC void UpdateInitVars(double *time, double X_[], double XD_[], double U_[], \
double DP_[], int IP_[], Dymola_bool LP_[], double F_[], double Y_[], double W_[], double QZ_[], double duser_[], int iuser_[], void*cuser_[], struct DYNInstanceData*did_,int initialCall)
{
return;
}
#endif
/* Must be initialized (and thus defined) because moutil is included first*/
static int DYNStrInit(struct DYNInstanceData*did_) {
if (DYNX(DYNAuxStr_,0)==0) {
int j;
for(j=0;j<sizeof(DYNAuxStr_)/sizeof(*DYNAuxStr_);++j) DYNAuxStr_[j]=did_->DYNAuxStrBuff_vec+j*
#if defined(MAXAuxStrLen_) && MAXAuxStrLen_>10
MAXAuxStrLen_
#else
10
#endif
;
}
return 0;
}
DYMOLA_STATIC void DYNSetAuxStringD(struct DYNInstanceData*did_,const char*s,int i, int setDEventIfDifferent) {
DYNStrInit(did_);
if (i>=0 && i<sizeof(DYNAuxStr_)/sizeof(*DYNAuxStr_)) {
int j,mlen=
#if defined(MAXAuxStrLen_) && MAXAuxStrLen_>10
MAXAuxStrLen_
#else
10
#endif
;
if (DYNSetAuxStringD && !did_->AnyDEvent_var && strncmp(&(DYNAuxStr_[i][0]), s, mlen-1) != 0) did_->AnyDEvent_var = 1;
for(j=0;j<mlen-1 && s[j];++j) DYNAuxStr_[i][j]=s[j];
DYNAuxStr_[i][j]=0;
if (s[j]) {DymosimMessage("Truncated string variable to");DymosimMessage(DYNAuxStr_[i]);}
} else DymosimMessage("Internal error in String handling.");
}
DYMOLA_STATIC void DYNSetAuxString(struct DYNInstanceData*did_, const char*s, int i) {
DYNSetAuxStringD(did_, s, i, 0);
}
DYMOLA_STATIC void DYNSetAuxStringArrayD(struct DYNInstanceData*did_, struct StringArray s, int i, int setDEventIfDifferent) {
int nrElem, j;
nrElem = StringNrElements(s);
if (i >= 0 && i + nrElem <= sizeof(DYNAuxStr_) / sizeof(*DYNAuxStr_)) {
for (j = 0; j<nrElem; ++j) {
DYNSetAuxStringD(did_, s.data[j], i + j, setDEventIfDifferent);
}
} else DymosimMessage("Internal error in String array handling.");
}
DYMOLA_STATIC void DYNSetAuxStringArray(struct DYNInstanceData*did_,struct StringArray s,int i) {
DYNSetAuxStringArrayD(did_, s, i, 0);
}
DYMOLA_STATIC const char*DYNGetAuxStr(struct DYNInstanceData*did_,int i) {
DYNStrInit(did_);
if (i>=0 && i<sizeof(DYNAuxStr_)/sizeof(*DYNAuxStr_)) {
return DYNAuxStr_[i];
}
return "";
}
static int QNLfunc_vec[QNLmax_ ? QNLmax_ : 1] = {0};
DYMOLA_STATIC int* QNLfunc = QNLfunc_vec;
static int QNLjac_vec[QNLmax_ ? QNLmax_ :1] = {0};
DYMOLA_STATIC int* QNLjac = QNLjac_vec;
DYMOLA_STATIC int QNLmax=QNLmax_;
static int QNLcalls_vec[QNLmax_ ? QNLmax_ : 1] = { 0 };
DYMOLA_STATIC int* QNLcalls = QNLcalls_vec;
#if !defined(NExternalObject_)
#define NExternalObject_ 10
#endif
DYMOLA_STATIC int Buffersize = 20000;
DYMOLA_STATIC int setDefault_=0;
DYMOLA_STATIC int setDefaultX_=0,setDefaultU_=0,setDefaultY_=0,setDefaultP_=0,setDefaultDX_=0,setDefaultW_=0;
DYMOLA_STATIC LIBDS_API_AFTER void freeDelay(delayStruct* del, const size_t nbrDel);
DYMOLA_STATIC void delayBuffersCloseNew(struct DYNInstanceData*did_, int destructExternalObject) {
int i;
for(i=0;i<SizeDelay_;++i) delayID_[i]=0;
for(i=0;i<MAXAux+10000;++i) Aux_[i]=0;
for(i=0;i<SizePre_;++i) QPre_[i]=0;
for(i=0;i<SizePre_;++i) RefPre_[i]=0;
for(i=0;i<SizeEq_;++i) EqRemember2_[i]=EqRemember1_[i]=EqRememberTemp_[i]=0;
for(i=0;i<NWhen_;++i) QEvaluateNew_[i]=QEvaluate_[i]=0;
for(i=0;i<NGlobalHelp_;++i) DYNhelp[i]=0;
for(i=0;i<NGlobalHelpI_;++i) did_->helpvari_vec[i]=0;
for(i=0;i<2*NRel_+1;++i)
oldQZ2_[i]=oldQZ3_[i] = QZold_[i]=oldQZ_[i]=oldQZDummy_[i]=0;
for(i=0;i<NRel_+1;++i) QRel_[i]=QM_[i]=Qn_[i] = Qp_[i]=Qscaled_[i]=0.0;
for(i=0;i<NSamp_;++i) {NextSampleTime_[i]=NextSampleTimeNew_[i]=0;NextSampleAct_[i]=NextSampleActNew_[i]=0;}
for(i=0;i<NRel_+1;++i) QL_[i]=Qenable_[i]=0;
for(i=0;i<NTim_+1;++i) QTimed_[i]=0;
EqRemember1Time_=-1e33;
EqRemember2Time_=-1e33;
EqRemAcc1Time_ = -1e33;
EqRemAcc2Time_ = -1e33;
EqRemTempTime_ = -1e33;
EqRemTempTimeAcc_ = -1e33;
(did_->hasStoredInTemp) = 0;
(did_->eqRememberReplaceOldDynamics) = 0;
(did_->eqRememberReplaceOldAccepted) = 0;
(did_->decoupleTime_var)=-1e33;
if (destructExternalObject) {
for (i = NExternalObject_ - 1; i >= 0; --i) {
/* Reverse order in case of dependencies */
void*x = did_->externalTable_vec[i].obj_;
did_->externalTable_vec[i].obj_ = 0;
if (x && did_->externalTable_vec[i].destructor_)
(*(did_->externalTable_vec[i].destructor_))(x);
did_->externalTable_vec[i].destructor_ = 0;
#if (defined(_OPENMP) && !defined(DISABLE_DYMOLA_OPENMP))
if (did_->externalTable_vec[i].haveLock_) {
omp_destroy_lock(&(did_->externalTable_vec[i].lockExternal_));
did_->externalTable_vec[i].haveLock_ = 0;
}
#endif
}
}
for(i=0;i<did_->DymolaTimerStructsLen_var;i++) {
did_->DymolaTimerStructs_vec[i].num=0;
}
freeDelay(did_->del_vec, SizeDelay_ ? SizeDelay_ : 1);
#if (defined(_OPENMP) && !defined(DISABLE_DYMOLA_OPENMP)) && defined(_MSC_VER)
if (!getenv("OMP_WAIT_POLICY")) Sleep(200);
/* Otherwise unloading the DLL may crash, see also https://stackoverflow.com/questions/34439956/vc-crash-when-freeing-a-dll-built-with-openmp */
#endif
}
DYMOLA_STATIC void delayBuffersClearExternalTables(struct DYNInstanceData*did_) {
int i;
for (i = 0; i < NExternalObject_; i++) {
did_->externalTable_vec[i].obj_ = 0;
did_->externalTable_vec[i].destructor_ = 0;
}
}
DYMOLA_STATIC void delayBuffersClose(void) {
delayBuffersCloseNew(&tempData, 1);
}
DYMOLA_STATIC int dynInstanceDataSize() {
return sizeof(struct DYNInstanceData);
}
DYMOLA_STATIC void* dynExternalObjectFirst(struct DYNInstanceData* did) {
if (did == 0) {
return 0;
}
return did->externalTable_vec[0].obj_;
}
DYMOLA_STATIC void CheckForEvents(struct DYNInstanceData*did_,double Time, int Init, int Event,
double QZ_[], int nrel_, double F_[], int nx_,double*duser_,int*iuser_)
/* SCRAMBLE ON */
{
#define DebugCheckForEvents 0
#define OvershootFactor 1.2
/* */
#define FindLastEvent 1
/* */
#define CheckForEventsEps 1e-10
/* */
#define SecondDegreeOvershootFactor 1.04
#define SecondDegreeUncertainty 0.4
#define SecondDegreeUncertainty2 0.7
int ZZZ715,ZZZ39; static double oldTime,oldDummyTime=-1e30;
static double oldTime2, oldDummyTime2, oldstepSizeRatio;
static double c1, c2, c1start; static double T1end=-1e30; static double T2end=-1e30, stepSizeRatio=1; double ZZZ8329, ZZZ7652;
#ifdef InterpolateStatesForInline
static const double CheckForEventsMinStep=0.2;
#else
static const double CheckForEventsMinStep=0;
#endif
int ZZZ5998; if (Init) {
#if defined(FindEvent_)
DymosimMessage("");
DymosimMessage("Approximative event finder used. Must be used with Euler method."); DymosimMessage("");
#endif
StepSize = 0; LastTime = 1E30; T1end = -1E30; T2end = -1E30; oldTime = Time; oldDummyTime = -1e30;
#if SecondDegree
oldTime2 = oldDummyTime2 = Time; oldstepSizeRatio = 1.0;
#endif
c1=1; c2=1; c1start=1; stepSizeRatio=1; } if (StepSize == 0 && Time > LastTime) StepSize = Time - LastTime;
if (Event) LastTime = Time; ZZZ5998 = Time>oldDummyTime; if (ZZZ5998) {
#if SecondDegree
oldTime2=oldTime; oldDummyTime2=oldDummyTime; oldstepSizeRatio=stepSizeRatio;
if (StepSize!=0) { if (Time>=T1end && Time<T2end) stepSizeRatio = (T1end-oldTime)/StepSize; else stepSizeRatio = (Time-oldTime)/StepSize; } for (ZZZ715 = 0; ZZZ715 < 2*nrel_;ZZZ715++) {oldQZ3_[ZZZ715]=oldQZ2_[ZZZ715];oldQZ2_[ZZZ715]=oldQZ_[ZZZ715];}
#endif
for(ZZZ715=0;ZZZ715<2*nrel_;ZZZ715++) oldQZ_[ZZZ715]=oldQZDummy_[ZZZ715]; }
{ for(ZZZ715=0;ZZZ715<2*nrel_;ZZZ715++) oldQZDummy_[ZZZ715]=Qenable_[ZZZ715/2+1] ? QZ_[ZZZ715] : 0; } if (StepSize!=0 && ZZZ5998) {
#if DebugCheckForEvents
double ZZZ1317 = 0; for (ZZZ715 = 0; ZZZ715 < 2*nrel_; ZZZ715++) { if (Qenable_[ZZZ715/2+1]) { if (oldQZ_[ZZZ715]*QZ_[ZZZ715]<0) {
double ZZZ8860; ZZZ8860=QZ_[ZZZ715]/(QZ_[ZZZ715]-oldQZ_[ZZZ715]); if (ZZZ8860>ZZZ1317) {ZZZ1317=ZZZ8860;ZZZ39=ZZZ715/2;} } } } if (ZZZ1317>0) { char ZZZ732[200]; if (Time<T2end) { sprintf(ZZZ732,"Event at projected time %.10g overshoot %.10g",T1end,c1*ZZZ1317+1);
} else if (stepSizeRatio>1+CheckForEventsEps || stepSizeRatio<1-CheckForEventsEps) { sprintf(ZZZ732,"Missed event at time %.10g interpolated at %.10g",Time,Time-ZZZ1317*stepSizeRatio*StepSize); } else { sprintf(ZZZ732,"Event at time %.10g interpolated at %.10g",Time,Time-ZZZ1317*StepSize); } DymosimMessage(ZZZ732);
#if SecondDegree
sprintf(ZZZ732,"Relation %d QZ=%.10g %.10g oldQZ=%.10g oldQZ2=%.10g oldQZ3=%.10g",ZZZ39,QZ_[2*ZZZ39],QZ_[2*ZZZ39+1],oldQZ_[2*ZZZ39],oldQZ2_[2*ZZZ39],oldQZ3_[2*ZZZ39]);
#else
sprintf(ZZZ732,"Relation %d QZ=%.10g %.10g oldQZ=%.10g",ZZZ39,QZ_[2*ZZZ39],QZ_[2*ZZZ39+1],oldQZ_[2*ZZZ39]);
#endif
DymosimMessage(ZZZ732); }
#endif
} if (StepSize != 0 && Time >= T2end) { c1 = c1start = FindLastEvent ? 0 :2; ZZZ7652 = (NextTimeEvent-Time)/StepSize; /* */
ZZZ39=-1; if (ZZZ7652>0 && ZZZ7652<2 && (FindLastEvent ? ZZZ7652>c1: ZZZ7652<c1)) { c1=ZZZ7652; } for (ZZZ715 = 0; ZZZ715 < 2*nrel_; ZZZ715++) { ZZZ8329 = (QZ_[ZZZ715] - oldQZ_[ZZZ715])/stepSizeRatio; if (QZ_[ZZZ715] * (OvershootFactor*ZZZ8329*2 + QZ_[ZZZ715]) < 0 && Qenable_[ZZZ715/2+1] && QZ_[2*(ZZZ715/2)]*QZ_[2*(ZZZ715/2)+1]>0 ) { /* */ ZZZ7652 = -QZ_[ZZZ715]/ZZZ8329+(OvershootFactor-1); /* */
#if SecondDegree
if (oldDummyTime2>-1e30 && (oldQZ_[ZZZ715]>0 ? oldQZ2_[ZZZ715]>oldQZ_[ZZZ715] : oldQZ2_[ZZZ715]<oldQZ_[ZZZ715])) { /* */ double ZZZ3419, ZZZ8687, ZZZ4213, ZZZ2231, ZZZ4006, ZZZ6591, ZZZ5281, ZZZ8430, ZZZ7134; ZZZ3419=QZ_[ZZZ715]; ZZZ8687=(stepSizeRatio+oldstepSizeRatio); ZZZ4213=stepSizeRatio*ZZZ8687*oldstepSizeRatio; ZZZ2231=(ZZZ8687*ZZZ8687*(oldQZ_[ZZZ715]-ZZZ3419)-stepSizeRatio*stepSizeRatio*(oldQZ2_[ZZZ715]-ZZZ3419))/ZZZ4213; ZZZ4006=(-ZZZ8687*(oldQZ_[ZZZ715]-ZZZ3419)+stepSizeRatio*(oldQZ2_[ZZZ715]-ZZZ3419))/ZZZ4213; ZZZ6591=4*ZZZ3419*ZZZ4006; ZZZ5281=ZZZ2231*ZZZ2231;
ZZZ8430=(oldQZ_[ZZZ715]>0 ? oldQZ3_[ZZZ715]>oldQZ2_[ZZZ715] : oldQZ3_[ZZZ715]<oldQZ2_[ZZZ715]) ? SecondDegreeUncertainty : SecondDegreeUncertainty2; ZZZ7134=ZZZ5281-(ZZZ6591>0 ? ZZZ6591*(1+ZZZ8430) : ZZZ6591*(1-ZZZ8430)); if (ZZZ7134>=0) { double ZZZ5803; ZZZ5803=-(2*ZZZ3419/(-ZZZ2231-(ZZZ2231>0?1:-1)*sqrt(ZZZ7134)))+(SecondDegreeOvershootFactor-1);
#if DebugCheckForEvents
{char ZZZ732[200]; sprintf(ZZZ732,"%d ZZZ3419 %.10g ZZZ8687 %.10g ZZZ4213 %.10g ZZZ2231 %.10g ZZZ7652 %.10g ZZZ7134 %.10g",ZZZ715,ZZZ3419,ZZZ8687,ZZZ4213,ZZZ2231,ZZZ4006,ZZZ7134); DymosimMessage(ZZZ732);
sprintf(ZZZ732,"C1: %g C2: %g beta=-%g alpha=-%g QZ=%.10g oldQZ=%.10g oldQZ2=%.10g",ZZZ7652,ZZZ5803,stepSizeRatio+oldstepSizeRatio,stepSizeRatio, QZ_[ZZZ715],oldQZ_[ZZZ715],oldQZ2_[ZZZ715]); DymosimMessage(ZZZ732); }
#endif
if (ZZZ5803>-0.5 && ZZZ5803 < 2.5) ZZZ7652=ZZZ5803; } }
#endif
if (ZZZ7652 > 0 && ZZZ7652<2 && (FindLastEvent ? ZZZ7652>c1: ZZZ7652<c1)) { /* */
c1 = ZZZ7652; /* */ZZZ39=ZZZ715/2; } } } if (c1 != 1E30 && c1 != c1start && c1<1+CheckForEventsMinStep) { /* */ if (c1<CheckForEventsMinStep) c1=CheckForEventsMinStep; c2 = 2 - c1;
T1end = Time + (1-CheckForEventsEps)*StepSize; T2end = Time + (2-CheckForEventsEps)*StepSize; /* */
#if DebugCheckForEvents
{char ZZZ732[200]; sprintf(ZZZ732,"Project at %.10g to %.10g Short %.10g Long %.10g",Time,T1end,c1*StepSize,c2*StepSize); DymosimMessage(ZZZ732);} {char ZZZ732[200];
#if SecondDegree
sprintf(ZZZ732,"Relation %d QZ=%.10g %.10g oldQZ=%.10g oldQZ2=%.10g oldQZ3=%.10g",ZZZ39,QZ_[2*ZZZ39],QZ_[2*ZZZ39+1],oldQZ_[2*ZZZ39],oldQZ2_[2*ZZZ39],oldQZ3_[2*ZZZ39]);
#else
sprintf(ZZZ732,"Relation %d QZ=%.10g %.10g oldQZ=%.10g",ZZZ39,QZ_[2*ZZZ39],QZ_[2*ZZZ39+1],oldQZ_[2*ZZZ39]);
#endif
DymosimMessage(ZZZ732); }
#endif
} else { c2=1; T2end=Time; T1end=Time; }
} oldTime=oldDummyTime=Time; currentStepSize_ = StepSize;
#ifdef InterpolateStatesForInline
currentStepSizeRatio_ = 1;
#endif
currentStepSizeRatio2_ = 1; if (Time < T1end) {
currentStepSizeRatio2_ = c1;
#ifdef InterpolateStatesForInline
currentStepSizeRatio_ = c1;
#else
currentStepSize_ = c1*StepSize;
#endif
for (ZZZ715 = 0; ZZZ715 < nx_; ZZZ715++) { F_[ZZZ715] = F_[ZZZ715]*c1; } /* */
} else if (Time < T2end) { currentStepSizeRatio2_ = c2;
#ifdef InterpolateStatesForInline
currentStepSizeRatio_ = c2;
#else
currentStepSize_ = c2*StepSize;
#endif
for (ZZZ715 = 0; ZZZ715 < nx_; ZZZ715++) { F_[ZZZ715] = F_[ZZZ715]*c2; }
/* */ oldTime=T1end; }}
/* SCRAMBLE OFF */
DYMOLA_STATIC int sprintfC(char*s, const char*format, ...);
DYMOLA_STATIC Dymola_bool sampleFunction(struct DYNInstanceData*did_,double Time, double start, double interval, int counter,
Dymola_bool Init, Dymola_bool Event) {
struct BasicDDymosimStruct*basicD=getBasicDDymosimStruct();
Dymola_bool samp = false;
if (Init || (Event && NextSampleAct_[counter]==0)) {
double x;
basicD->mOrigTimeError=Dymola_max(basicD->mOrigTimeError,fabs(start)); /* Collect them */
x=findCounter(Dymola_max(Time,start),start,interval);
if (Init || x>NextSampleTime_[counter])
NextSampleTime_[counter]=x;
/* Samples at start,start+interval,...*/
/* Replace Dymola_max(Time,start) by Time to sample at ...,start-interval,start,start+interval */
};
if (Event) {
double eventTime=start+(NextSampleTime_[counter]-1)*interval;
const double eventAccuracy=
#ifndef DynSimStruct
5e-14
#else
1e-7
#endif
*(fabs(Time)+basicD->mOrigTimeError);
/* 5*eps to guard against different times */
/*DymosimMessageDouble("Event at time: ",Time);*/
/*DymosimMessageDouble("Event Time:",eventTime);*/
while (eventTime<=Time+eventAccuracy) {
NextSampleTime_[counter]+=1;
eventTime=start+(NextSampleTime_[counter]-1)*interval;
/*DymosimMessageDouble("Sampling at time: ", Time);*/
/*DymosimMessageDouble("Next sampling",eventTime);*/
samp = true;
}
NextSampleTimeNew_[counter]=NextSampleTime_[counter];
NextSampleActNew_[counter]=1;
registerTimeEventNew(eventTime, basicD); /* The next event for this sampler */
samp = samp && (Iter == 1);
{
struct BasicIDymosimStruct* basicI = getBasicIDymosimStruct();
if (samp && basicI->mPrintEvent&(1 << 1) && (!Init || basicI->mPrintEvent&(1 << 2))) {
char str[60], message_str[100];
sprintfC(str, "sample(%g,%g)", start, interval);
DynLogEvents(-1, 0, 1, str, 1, 0);
sprintfC(message_str, "Sample event (%.60s) at time:", str);
DymosimMessageDouble(message_str, Time);
}
}
}
return samp;
}
DYMOLA_STATIC double DYNTimeFloorEvent(int do_divide, double y, struct DYNInstanceData*did_, double time, Dymola_bool*AnyEvent, Dymola_bool*AnyDEvent, int sampleCounter, int pr, int DymolaOneIteration, const char*str) {
double res = did_->NextSampleTime_vec[sampleCounter];
int wasZero;
if (!AnyEvent) return 0;
wasZero = (0 == res);
{
const double currVal = do_divide ? (time / y) : (time*y);
if (res > 0) res -= 1.0;
if (wasZero || currVal<res || currVal >= res + 1) {
if (AnyDEvent) *AnyDEvent = 1;
if (pr) {
DymosimMessageDouble("Time event at time:", time);
DynLogEvents(-1, 0, 1, str, 1, res);
}
*AnyEvent = 1;
/* Update res */
res = floor(currVal);
did_->NextSampleTimeNew_vec[sampleCounter] = (res >= 0) ? res + 1 : res;
}
{
/* Compute new time event point */
double resNext = (y > 0) ? (res + 1) : (res);
volatile double tNext = do_divide ? (resNext*y) : (resNext / y);
struct BasicDDymosimStruct*basicD = getBasicDDymosimStruct();
if (do_divide ? floor(tNext / y) == res : floor(tNext*y) == res) {
#if defined(_MSC_VER) ? (_MSC_VER>=1800) : (__STDC_VERSION__>199901L)
tNext = nextafter(tNext, DBL_MAX)+1e-20*fabs((do_divide?(1/y):y));
#else
tNext = tNext*(1 + DBL_EPSILON) + 1e-20*fabs((do_divide ? (1 / y) : y));
#endif
}
registerTimeEventNew(tNext, basicD);
}
}
{
return res;
}
}
DYMOLA_STATIC Dymola_bool sampleFunctionM(struct DYNInstanceData*did_,double Time, double start, double interval, int counter,
Dymola_bool Init, Dymola_bool Event) {
struct BasicDDymosimStruct*basicD=getBasicDDymosimStruct();
Dymola_bool samp = false;
if (interval<=0) DymosimError("Sample did not have positive sample interval");
if (Init|| (Event && NextSampleTime_[counter]==0)) {
double x;
if (Init==2) return false;
basicD->mOrigTimeError=Dymola_max(basicD->mOrigTimeError,fabs(start)); /* Collect them */
x=findCounter(Dymola_max(Time,start),start,interval);
if (Init || x>NextSampleTime_[counter])
NextSampleTime_[counter]=x;
/* Samples at start,start+interval,...*/
/* Replace Dymola_max(Time,start) by Time to sample at ...,start-interval,start,start+interval */
};
if (Event) {
double eventTime=start+(NextSampleTime_[counter]-1)*interval;
const double eventAccuracy=
#ifndef DynSimStruct
5e-14
#else
1e-7
#endif
*(fabs(Time)+basicD->mOrigTimeError);
/* 5*eps to guard against different times */
/*DymosimMessageDouble("Event at time: ",Time);*/
/*DymosimMessageDouble("Event Time:",eventTime);*/
NextSampleTimeNew_[counter]=NextSampleTime_[counter];
while (eventTime<=Time+eventAccuracy) {
NextSampleTimeNew_[counter]+=1;
eventTime=start+(NextSampleTimeNew_[counter]-1)*interval;
/*DymosimMessageDouble("Sampling at time: ", Time);*/
/*DymosimMessageDouble("Next sampling",eventTime);*/
samp = true;
}
NextSampleActNew_[counter]=1;
registerTimeEventNew(eventTime, basicD); /* The next event for this sampler */
samp = samp && (Iter == 1);
{
struct BasicIDymosimStruct* basicI = getBasicIDymosimStruct();
if (samp && basicI->mPrintEvent&(1 << 1) && (!Init || basicI->mPrintEvent&(1 << 2))) {
char str[60], message_str[100];
sprintfC(str, "sample(%g,%g)", start, interval);
DynLogEvents(-1, 0, 1, str, 1, 0);
sprintfC(message_str, "Sample event (%.60s) at time:", str);
DymosimMessageDouble(message_str,Time);
}
}
}
return samp;
}
DYMOLA_STATIC Dymola_bool sampleFunctionM3(struct DYNInstanceData*did_,double Time, double start, double interval, int phase, int maxVal, int counter,
Dymola_bool Init, Dymola_bool Event) {
Dymola_bool samp = false;
if (interval <= 0 || maxVal <= 0) DymosimError("Sample did not have positive sample interval");
#if (defined(_OPENMP) && defined(DYMOLA_SUBPARA) && !defined(DISABLE_DYMOLA_OPENMP))
#pragma omp critical(TimeEvent)
#endif
{
struct BasicDDymosimStruct*basicD = getBasicDDymosimStruct();
double x, x2;
if (Init != 2) {
if (Init || (Event && NextSampleTime_[counter] == 0)) {
basicD->mOrigTimeError = Dymola_max(basicD->mOrigTimeError, fabs(start)); /* Collect them */
x = findCounter(Dymola_max(Time, start), start, interval);
if (Init || x > NextSampleTime_[counter])
NextSampleTime_[counter] = x;
/* Samples at start,start+interval,...*/
/* Replace Dymola_max(Time,start) by Time to sample at ...,start-interval,start,start+interval */
}
};
if (Event) {
double eventTime = start + (NextSampleTime_[counter] - 1)*interval;
const double eventAccuracy =
#ifndef DynSimStruct
5e-14
#else
1e-7
#endif
*(fabs(Time) + basicD->mOrigTimeError);
/* 5*eps to guard against different times */
/*DymosimMessageDouble("Event at time: ",Time);*/
/*DymosimMessageDouble("Event Time:",eventTime);*/
NextSampleTimeNew_[counter] = NextSampleTime_[counter];
while (eventTime <= Time + eventAccuracy) {
NextSampleTimeNew_[counter] += 1;
eventTime = start + (NextSampleTimeNew_[counter] - 1)*interval;
/*DymosimMessageDouble("Sampling at time: ", Time);*/
/*DymosimMessageDouble("Next sampling",eventTime);*/
samp = true;
}
x = floor((0.1 + NextSampleTimeNew_[counter] - 2 - (maxVal - 1 - phase)) / maxVal); /* the passed point */
x2 = x*maxVal + (maxVal - 1 - phase);
samp = samp && (x2 == (NextSampleTimeNew_[counter] - 2)) && x >= 0; /* Handle negative phase */
eventTime = start + (x2 + maxVal)*interval;
NextSampleActNew_[counter] = 1;
registerTimeEventNew(eventTime, basicD); /* The next event for this sampler */
samp = samp && (Iter == 1);
{
struct BasicIDymosimStruct* basicI = getBasicIDymosimStruct();
if (samp && basicI->mPrintEvent&(1 << 1) && (!Init || basicI->mPrintEvent&(1 << 2))) {
char str[60], message_str[100];
sprintfC(str, "sample(%g,%g)", start, interval);
DynLogEvents(-1, 0, 1, str, 1, 0);
sprintfC(message_str, "Sample event (%.60s) at time:", str);
DymosimMessageDouble(message_str, Time);
}
}
}
}
return samp;
}
#if defined(DYMOSIM) && defined(NI_)
LIBDS_API void InitI2(int, int, double*,int*);
static void InitI(struct DYNInstanceData* did_,int n,int d) {
InitI2(n, d, QImd_, QIml_);
}
#else
static void InitI(struct DYNInstanceData* did_,int n,int d) {
;
}
#endif
DYMOLA_STATIC void ClearNextSample(struct DYNInstanceData* did_) {
int i;
for(i=0;i<NSamp_;++i) NextSampleActNew_[i]=0;
}
#if defined(DIRECT_FEED_THROUGH)
DYMOLA_STATIC int DirectFeedThrough_=1;
#else
DYMOLA_STATIC int DirectFeedThrough_=0;
#endif
#if DymolaUseRDTSC_
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
#include "intrin.h"
#endif
static double rtdrealFrequency=1.0e9;
static double rtdinvFreq=1.0/1.0e9;
struct MyLargeInteger {
unsigned int LowPart;
unsigned int HighPart;
};
static const double MInt=4294967296.0;
static double DymolaPerformance(double*d,int i) {
struct MyLargeInteger count={0,0};
if (sizeof(struct MyLargeInteger)!=sizeof(*d))
return -1;
{
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
(*(unsigned __int64*)&count)=__rdtsc();
#elif defined(__GNUC__)
/* Gnu assembler: other names of registers, declare that rdtsc clobbers registers
and different order of operands */
__asm("rdtsc" : /* none */ : : "eax", "edx" );
__asm("mov %%eax, %0" : "=g" (count.LowPart));
__asm("mov %%edx, %0" : "=g" (count.HighPart));
#elif defined(__LCC__)
/* Lcc, default in Matlab, has it as an intrinsic function */
extern long long _stdcall _rdtsc(void);
{
*(long long*)(&count)=_rdtsc();
}
#endif
}
if (i==0) {
if (d) *(struct MyLargeInteger*)(d)=count;
return count.LowPart+count.HighPart*MInt;
} else {
struct MyLargeInteger*a=(struct MyLargeInteger*)(d);
return (
(count.HighPart*1.0-a->HighPart)*MInt+count.LowPart-a->LowPart)*rtdinvFreq;
}
}
struct RegisterReturn {
int EAXV1,EBXV1,ECXV1,EDXV1;
int EAXV2,EBXV2,ECXV2,EDXV2;
int EAXV3,EBXV3,ECXV3,EDXV3;
};
static void InitializeFrequency(double d) {
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
int dummy[4];
#endif
static int firstCall=1;
if (!firstCall) return;
firstCall=0;
if (d==0) {
unsigned int x=0x80000000UL;
union {
struct RegisterReturn registerReturn;
char ch[48];
} myValues;
myValues.ch[0]='\0';
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
__cpuid(dummy,x);
x=dummy[0];
#elif defined(__GNUC__) && defined(i386)
__asm("mov %0, %%eax": /*none */: "g" (x));
__asm("cpuid");
__asm("mov %%eax, %0" : "=g" (x));
#endif
if (x>=0x80000004UL) {
x=0x80000002UL;
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
__cpuid(dummy,x);
myValues.registerReturn.EAXV1=dummy[0];
myValues.registerReturn.EBXV1=dummy[1];
myValues.registerReturn.ECXV1=dummy[2];
myValues.registerReturn.EDXV1=dummy[3];
#elif defined(__GNUC__) && defined(i386)
__asm("mov %0, %%eax": : "g" (x));
__asm("cpuid" : : : "eax", "ebx", "ecx", "edx");
__asm("mov %%eax, %0": "=g" (myValues.registerReturn.EAXV1));
__asm("mov %%ebx, %0": "=g" (myValues.registerReturn.EBXV1));
__asm("mov %%ecx, %0": "=g" (myValues.registerReturn.ECXV1));
__asm("mov %%edx, %0": "=g" (myValues.registerReturn.EDXV1));
#endif
x=0x80000003UL;
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
__cpuid(dummy,x);
myValues.registerReturn.EAXV2=dummy[0];
myValues.registerReturn.EBXV2=dummy[1];
myValues.registerReturn.ECXV2=dummy[2];
myValues.registerReturn.EDXV2=dummy[3];
#elif defined(__GNUC__) && defined(i386)
__asm("mov %0, %%eax": : "g" (x));
__asm("cpuid" : : : "eax", "ebx", "ecx", "edx");
__asm("mov %%eax, %0": "=g" (myValues.registerReturn.EAXV2));
__asm("mov %%ebx, %0": "=g" (myValues.registerReturn.EBXV2));
__asm("mov %%ecx, %0": "=g" (myValues.registerReturn.ECXV2));
__asm("mov %%edx, %0": "=g" (myValues.registerReturn.EDXV2));
#endif
x=0x80000004UL;
#if defined(_MSC_VER) && (defined(_M_AMD64)||defined(_M_IX86))
__cpuid(dummy,x);
myValues.registerReturn.EAXV3=dummy[0];
myValues.registerReturn.EBXV3=dummy[1];
myValues.registerReturn.ECXV3=dummy[2];
myValues.registerReturn.EDXV3=dummy[3];
#elif defined(__GNUC__) && defined(i386)
__asm("mov %0, %%eax": : "g" (x));
__asm("cpuid" : : : "eax", "ebx", "ecx", "edx");
__asm("mov %%eax, %0": "=g" (myValues.registerReturn.EAXV3));
__asm("mov %%ebx, %0": "=g" (myValues.registerReturn.EBXV3));
__asm("mov %%ecx, %0": "=g" (myValues.registerReturn.ECXV3));
__asm("mov %%edx, %0": "=g" (myValues.registerReturn.EDXV3));
#endif
{
double dMult=1e6;
char*lastM=strrchr(myValues.ch,'M');
if (lastM!=0 && lastM[1]=='H' && lastM[2]=='z') {
} else {
lastM=strrchr(myValues.ch,'G');
if (lastM!=0 && lastM[1]=='H' && lastM[2]=='z') {
dMult=1e9;
} else lastM=0;
}
if (lastM!=0) {
for(;lastM>myValues.ch && lastM[-1]==' ';lastM--);
for(;lastM>myValues.ch && (lastM[-1]>='0' && lastM[-1]<='9' || lastM[-1]=='.');lastM--);
if (sscanf(lastM,"%lg",&d)!=1) {
d=0;
} else d*=dMult;
}
}
}
if (d==0) {
char str[200];
sprintf(str,"Could not determine speed of processor. Assuming 1GHz\nCPU reported: %s\n",myValues.ch);
DymosimMessage(str);
d=1e9;
} else {
char str[200];
sprintf(str,"Determined processor speed to %lg MHz\nCPU reported: %s\n",d/1e6,myValues.ch);
DymosimMessage(str);
}
}
rtdrealFrequency=d;
rtdinvFreq=1.0/rtdrealFrequency;
{
extern double (*DymolaTimerCounterCallback)(double*,int);
DymolaTimerCounterCallback=&DymolaPerformance;
}
}
#if defined(DymolaUseRDTSCFrequency_)
#define SetupProcessorCounter() InitializeFrequency(DymolaUseRDTSCFrequency_)
#else
#define SetupProcessorCounter() InitializeFrequency(0.0)
#endif
#else
#define SetupProcessorCounter()
#endif
DYMOLA_STATIC void GetDimensions(int *nx_, int *nx2_, int *nu_, int *ny_, int *nw_, int *np_,
int *nrel_, int *ncons_, int *dae_)
{
*nx_ = NX_;
*nx2_ = NX2_;
*nu_ = NU_;
*ny_ = NY_;
*nw_ = NW_;
*np_ = NP_;
*nrel_ = NRel_;
*ncons_ = NCons_;
*dae_ = DAEsolver_;
#if defined(DynSimStruct)
SetupProcessorCounter();
#endif
}
DYMOLA_STATIC void GetDimensions2(int *nx_, int *nx2_, int *nu_, int *ny_, int *nw_, int *np_, int* nsp_,
int*nrel2_,int *nrel_, int *ncons_, int *dae_)
{
*nx_ = NX_;
*nx2_ = NX2_;
*nu_ = NU_;
*ny_ = NY_;
*nw_ = NW_;
#ifdef NPS_
*nsp_ = NPS_;
#else
*nsp_ = 0;
#endif
*np_ = NP_;
*nrel_ = NRel_;
#ifdef NRelF_
*nrel2_ = NRelF_;
#else
*nrel2_ = NRel_;
#endif
*ncons_ = NCons_;
*dae_ = DAEsolver_;
#if defined(DynSimStruct)
SetupProcessorCounter();
#endif
}
DYMOLA_STATIC void GetDimensions4(int *nx_, int *nx2_, int *nu_, int *ny_, int *nw_, int *np_, int* nsp_,
int*nrel2_,int *nrel_, int *ncons_, int *dae_, int *nd_, int* nxp_){
*nx_ = NX_;
*nx2_ = NX2_;
*nu_ = NU_;
*ny_ = NY_;
*nw_ = NW_;
#ifdef NPS_
*nsp_ = NPS_;
#else
*nsp_ = 0;
#endif
*np_ = NP_;
*nrel_ = NRel_;
#ifdef NRelF_
*nrel2_ = NRelF_;
#else
*nrel2_ = NRel_;
#endif
*ncons_ = NCons_;
*dae_ = DAEsolver_;
*nd_ = ND_;
*nxp_ = NXP_;
#if defined(DynSimStruct)
SetupProcessorCounter();
#endif
}
static int nx_=NX_;
static int nx2_=NX2_;
static int nu_=NU_;
static int ny_=NY_;
static int nw_=NW_;
static int np_=NP_;
#ifdef NPS_
static int nsp_=NPS_;
#else
static int nsp_=0;
#endif
#ifdef NRelF_
static int nrel2_=NRelF_;
#else
static int nrel2_=NRel_;
#endif
static int nrel_=NRel_;
static int ncons_=NCons_;
static int dae_=DAEsolver_;
DYMOLA_STATIC void GetDimensions3(int *nrel_, int *ntim_, int *ncheckif_, int *nsamp_, int *nwhen_, int *nglobalhelp_,
int *maxaux, int *qnlmax_, int *sizepre_, int *sizeeq_)
{
*nrel_ = NRel_;
*ntim_ = NTim_;
*ncheckif_ = NCheckIf_;
*nsamp_ = NSamp_;
*nwhen_ = NWhen_;
*nglobalhelp_ = NGlobalHelp_;
*maxaux = MAXAux;
*qnlmax_ = QNLmax_;
*sizepre_ = SizePre_;
*sizeeq_ = SizeEq_;
}
DYMOLA_STATIC void InitializeDymosimStruct(struct BasicDDymosimStruct*basicD,struct BasicIDymosimStruct*basicI) {
#if INLINE_INTEGRATION
basicI->mInlineIntegration=INLINE_INTEGRATION;
#else
basicI->mInlineIntegration=0;
#endif
#if defined(DymolaGeneratedFixedStepSize_)
basicD->mDymolaFixedStepSize=DymolaGeneratedFixedStepSize_;
#else
basicD->mDymolaFixedStepSize=0.0;
#endif
basicD->mCurrentStepSizeRatio2 = 1.0;
basicD->mOrigTimeError=0;
#if defined(FMUStoreResultInterval_)
basicD->mStoreResultInterval = FMUStoreResultInterval_;
#else
basicD->mStoreResultInterval = 0;
#endif
}
#if defined(RT) || defined(NRT)
#else
DYMOLA_STATIC int dsblockb_(const int *iopt_, int info_[], int sig_[], int dim_[],
double *t0_, double x0_[], double xd0_[],
double dp_[], int ip_[], Dymola_bool lp_[],
double duser_[], int iuser_[], void*cuser_[],
int *QiErr)
{
int /* c1_, c2_, c3_, i_, */ nx2_;
/* double d1_; */
*(GlobalErrorPointer())=0;
if (DAEsolver_)
nx2_ = NX2_;
else
nx2_ = 0;
if (*iopt_ == 1) {
} else if (*iopt_ == 2) {
SetupProcessorCounter();
if (NCons_ > 0)
info_[0] = 2;
else if (DAEsolver_)
info_[0] = 1;
#if defined(HaveDummyDerivative_)
info_[1] = 1;
#endif
#if defined(AnalyticJacobian_)
info_[2] = AnalyticJacobianElements_;
#endif
/* if (NRel_ > 0 && NX_ + nx2_ == 0) */
if (NX_ + nx2_ == 0)
sig_[0] = 1; /* To enable handling of "state events" without states. */
else
sig_[0] = NX_ + nx2_;
sig_[1] = NU_;
sig_[2] = NY_;
if (NRel_ > 0 && RootFinder_)
sig_[3] = 1;
sig_[4] = NW_;
sig_[5] = NP_;
sig_[6] = NA_; /* Number of alias signal matrices or scalars */
sig_[7] = NA_; /* Total number of alias elements */
/* if (NRel_ > 0 && NX_ + nx2_ == 0) */
if (NX_ + nx2_ == 0)
dim_[0] = 1;
else
dim_[0] = NX_ + nx2_;
dim_[1] = NU_;
dim_[2] = NY_;
dim_[3] = 2 * NRel_;
dim_[4] = NW_;
dim_[5] = NP_;
dim_[6] = NRelF_;
dim_[7] = SizeEq_;
dim_[9] = NHash1_;
dim_[10] = NHash2_;
dim_[11] = NX_;
dim_[12] = nx2_;
dim_[13] = NGlobalHelp_;
dim_[14] = NHash3_;
#ifdef NPS_
dim_[15] = NPS_;
#endif
dim_[8] = dim_[0] + dim_[2] + dim_[3]*sig_[3] + dim_[4] + sizeof(struct BasicDDymosimStruct)/sizeof(doublereal);
} else if (*iopt_ == 3) {
iuser_[0] = NX_ + nx2_ + NCons_;
iuser_[1] = NY_;
iuser_[2] = NW_;
iuser_[3] = 2 * NRel_;
InitializeDymosimStruct((struct BasicDDymosimStruct*)(duser_+
iuser_[0]+iuser_[1]+iuser_[2]+iuser_[3]),(struct BasicIDymosimStruct*)(iuser_+4));
/* if (NRel_ > 0 && NX_ + nx2_ == 0) */
declareNew_(x0_, dp_, 0, cuser_, QiErr, 0, (struct DeclarePhase*)0);
}
else if (*iopt_ == 4) {
declareNew_(x0_,dp_,0,cuser_,QiErr, 1, (struct DeclarePhase*)0);
} else if (*iopt_ == 5) {
InitializeDymosimStruct((struct BasicDDymosimStruct*)(duser_),(struct BasicIDymosimStruct*)(iuser_));
}
leave:
if (*(GlobalErrorPointer()) != 0)
*QiErr = *(GlobalErrorPointer());
return 0;
}
#endif
#if defined(AnalyticJacobian_) && defined(AnalyticJacobianBCD_)
DYMOLA_STATIC int QJacobianDefined_=1;
#else
DYMOLA_STATIC int QJacobianDefined_=0;
#endif
#if !defined(QJacobianCGDef_)
DYMOLA_STATIC int QJacobianCG_[3]={0,0,0};
DYMOLA_STATIC struct QJacobianTag_ QJacobianGC2_[1]={{0,0}};
DYMOLA_STATIC double QJacobianCD_[1]={0};
#endif
#if !defined(DYN_COMPILE_WITH_SPARSE) && !defined(FMU_SOURCE_CODE_EXPORT_SPARSE)
DYMOLA_STATIC void *superlu_enabled(int nx, int analytic_jacobian, int nnz, const int nxOrig, const int cgOffset, const int gcOffset) { return 0; }
DYMOLA_STATIC int superlu_init(void *vdata, int use_slui_data_variables, int jacobian_rowvals_needs_reset) { return 1; }
DYMOLA_STATIC int superlu_get_nnz(void *vdata) { return 0; }
DYMOLA_STATIC int superlu_get_num_procs(void *vdata) { return 0; }
DYMOLA_STATIC void* superlu_init_interface_util(void *vdata) { return (void *)0; }
DYMOLA_STATIC void* superlu_init_interface_util_2(void *vdata, int n) { return (void *)0; }
DYMOLA_STATIC int superlu_set_jacobian_group(void *vdata, int group_number, double factor, const double* ypert1, const double* ypert2,
const double* fpert1, const double* fpert2, int* rowvals, double* data) { return 1; }
DYMOLA_STATIC double superlu_jacobian_norm(void *vdata, double* ewt) { return -1.0; }
DYMOLA_STATIC int superlu_scale_jacobian(void *vdata, double val) { return 1; }
DYMOLA_STATIC int superlu_shift_jacobian(void *vdata, double val) { return 1; }
DYMOLA_STATIC int superlu_get_dsmodel_pointers_for_analytic_jacobian(void *vdata, double **vals, int **rows, int **cols) { return 1; }
DYMOLA_STATIC int superlu_set_analytic_jacobian_structure(void *vdata, int *rowvals, int *colptrs) { return 1; }
DYMOLA_STATIC int superlu_reset_jacobian_structure(void *vdata, int* rowvals, int* colptrs) { return 1; }
DYMOLA_STATIC int superlu_set_analytic_jacobian_values(void *vdata, double *data) { return 1; }
DYMOLA_STATIC int superlu_factorize(void *vdata, void *slui_util, int M, int N, int NNZ, double* data, int *rowvals, int *colptrs) { return 1; }
DYMOLA_STATIC int superlu_solve(void *vdata, double* x, void *slui_util) { return 1; }
DYMOLA_STATIC void superlu_deallocate(void *vdata) { ; }
DYMOLA_STATIC void superlu_dealloc_interface_util(void* slui_util) { ; }
DYMOLA_STATIC int CVSlsSetSparseJacFn(void *cvode_mem, void *jac) { return -1; }
DYMOLA_STATIC int CVSlsGetNumJacEvals(void *ida_mem, long int *njevals) { return -1; }
DYMOLA_STATIC int CVSuperLUMT(void *cv_mem, int num_threads, int n, int nnz) { return -1; }
#ifdef GODESS
DYMOLA_STATIC void* NewSparseMat(int M, int N, int NNZ) { return ((void*)0); }
DYMOLA_STATIC void* SlsConvertDls(void* A) { return ((void*)0); }
DYMOLA_STATIC void DestroySparseMat(void* A) { ; }
DYMOLA_STATIC void SlsSetToZero(void* A) { ; }
DYMOLA_STATIC void CopySparseMat(void* A, void* B) { ; }
DYMOLA_STATIC void ScaleSparseMat(double b, void* A) { ; }
DYMOLA_STATIC void AddIdentitySparseMat(void* A) { ; }
DYMOLA_STATIC int SlsAddMat(void* A, void* B) { return 1; }
DYMOLA_STATIC void ReallocSparseMat(void* A) { ; }
DYMOLA_STATIC int SlsMatvec(void* A, double *x, double *y) { return 1; }
DYMOLA_STATIC void PrintSparseMat(void* A) { ; }
DYMOLA_STATIC int CVSlsGetLastFlag(void *ida_mem, long int *flag) { return -1; }
DYMOLA_STATIC char *CVSlsGetReturnFlagName(long int flag) { return ""; }
DYMOLA_STATIC int CVSuperLUMTSetOrdering(void *cv_mem, int ordering_choice) { return -1; }
#endif /* GODESS */
#endif /* DYN_COMPILE_WITH_SPARSE */
/* vector to hold FMI value references for possible continuous time states*/
#if !defined(FMIStateValueReferencesDef_)
DYMOLA_STATIC unsigned int FMIStateValueReferences_[1]={~0};
#endif
DYMOLA_STATIC void GetNonlinearSolverStatistics(int*const qnlmax, const int**const qnlfunc,
const int**const qnljac, const int**const qnlcalls, int*const nrtimers)
{
if (qnlmax) *qnlmax = QNLmax;
if (qnlfunc) *qnlfunc = QNLfunc;
if (qnljac) *qnljac = QNLjac;
if (qnlcalls) *qnlcalls = QNLcalls;
#ifdef NrDymolaTimers_
if (nrtimers) *nrtimers = NrDymolaTimers_;
#else
if (nrtimers) *nrtimers = 0;
#endif
}
DYMOLA_STATIC void getDelayStruct(struct DYNInstanceData* did_, delayStruct** del, size_t* nbrDel) {
if(nbrDel)
*nbrDel = SizeDelay_ ? SizeDelay_ : 1;
if (del) {
if (did_) {
*del = did_->del_vec;
}else {
*del = tempData.del_vec;
}
}
}
DYMOLA_STATIC int GetAdditionalFlags(int flag_num)
{
switch (flag_num) {
case 1:
#if defined(DYNSparseJacobian_) && (defined(DYN_COMPILE_WITH_SPARSE) || defined(FMU_SOURCE_CODE_EXPORT_SPARSE))
return DYNSparseJacobian_;
#else
return 0;
#endif
break;
case 2:
#ifdef DYNDAEsolverErrorControlAlgebraicVariables_
return DYNDAEsolverErrorControlAlgebraicVariables_;
#else
return 1;
#endif
break;
case 3:
#ifdef DYNResultSnapshotTimeInFileName_
return DYNResultSnapshotTimeInFileName_;
#else
return 1;
#endif
break;
default:
return 0;
}
}
DYMOLA_STATIC double GetAdditionalReals(int flag_num)
{
switch (flag_num) {
case 1:
#ifdef DYNResultSnapshotInterval_
return DYNResultSnapshotInterval_;
#else
return 0.0;
#endif
break;
default:
return 0.0;
}
}
/* End dsblock5.c */
|
roi_align.c | #include <TH/TH.h>
#include <math.h>
#include <omp.h>
void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data);
void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data);
int roi_align_forward(int aligned_height, int aligned_width, float spatial_scale,
THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output)
{
//Grab the input tensor
float * data_flat = THFloatTensor_data(features);
float * rois_flat = THFloatTensor_data(rois);
float * output_flat = THFloatTensor_data(output);
// Number of ROIs
int num_rois = THFloatTensor_size(rois, 0);
int size_rois = THFloatTensor_size(rois, 1);
if (size_rois != 5)
{
return 0;
}
// data height
int data_height = THFloatTensor_size(features, 2);
// data width
int data_width = THFloatTensor_size(features, 3);
// Number of channels
int num_channels = THFloatTensor_size(features, 1);
// do ROIAlignForward
ROIAlignForwardCpu(data_flat, spatial_scale, num_rois, data_height, data_width, num_channels,
aligned_height, aligned_width, rois_flat, output_flat);
return 1;
}
int roi_align_backward(int aligned_height, int aligned_width, float spatial_scale,
THFloatTensor * top_grad, THFloatTensor * rois, THFloatTensor * bottom_grad)
{
//Grab the input tensor
float * top_grad_flat = THFloatTensor_data(top_grad);
float * rois_flat = THFloatTensor_data(rois);
float * bottom_grad_flat = THFloatTensor_data(bottom_grad);
// Number of ROIs
int num_rois = THFloatTensor_size(rois, 0);
int size_rois = THFloatTensor_size(rois, 1);
if (size_rois != 5)
{
return 0;
}
// batch size
int batch_size = THFloatTensor_size(bottom_grad, 0);
// data height
int data_height = THFloatTensor_size(bottom_grad, 2);
// data width
int data_width = THFloatTensor_size(bottom_grad, 3);
// Number of channels
int num_channels = THFloatTensor_size(bottom_grad, 1);
// do ROIAlignBackward
ROIAlignBackwardCpu(top_grad_flat, spatial_scale, num_rois, data_height,
data_width, num_channels, aligned_height, aligned_width, rois_flat, bottom_grad_flat);
return 1;
}
void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data)
{
int pw, ph, c, n;
float roi_batch_ind, roi_start_w, roi_start_h, roi_end_w, roi_end_h;
// Force malformed ROI to be 1x1
float roi_width, roi_height, bin_size_h, bin_size_w;
float h, w;
int hstart, wstart;
int img_start;
float h_ratio, w_ratio;
int upleft, upright, downleft, downright;
const int output_size = num_rois * aligned_height * aligned_width * channels;
int idx;
#pragma omp parallel for
for (idx = 0; idx < output_size; ++idx)
{
// (n, c, ph, pw) is an element in the aligned output
pw = idx % aligned_width;
ph = (idx / aligned_width) % aligned_height;
c = (idx / aligned_width / aligned_height) % channels;
n = idx / aligned_width / aligned_height / channels;
roi_batch_ind = bottom_rois[n * 5 + 0];
roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale;
roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale;
roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale;
roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale;
// Force malformed ROI to be 1x1
roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.);
roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.);
bin_size_h = roi_height / (aligned_height - 1.);
bin_size_w = roi_width / (aligned_width - 1.);
h = (float)(ph) * bin_size_h + roi_start_h;
w = (float)(pw) * bin_size_w + roi_start_w;
hstart = fminf(floor(h), height - 2);
wstart = fminf(floor(w), width - 2);
img_start = roi_batch_ind * channels * height * width;
// bilinear interpolation
if (h < 0 || h >= height || w < 0 || w >= width)
{
top_data[idx] = 0.;
}
else
{
h_ratio = h - (float)(hstart);
w_ratio = w - (float)(wstart);
upleft = img_start + (c * height + hstart) * width + wstart;
upright = upleft + 1;
downleft = upleft + width;
downright = downleft + 1;
top_data[idx] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio)
+ bottom_data[upright] * (1. - h_ratio) * w_ratio
+ bottom_data[downleft] * h_ratio * (1. - w_ratio)
+ bottom_data[downright] * h_ratio * w_ratio;
}
}
}
void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* bottom_diff)
{
int pw, ph, c, n;
float roi_batch_ind, roi_start_w, roi_start_h, roi_end_w, roi_end_h;
float roi_width, roi_height, bin_size_w, bin_size_h;
float h, w;
int hstart, wstart, img_start;
float h_ratio, w_ratio;
int upleft, upright, downleft, downright;
const int output_size = num_rois * aligned_height * aligned_width * channels;
int idx;
#pragma omp parallel for
for (idx = 0; idx < output_size; ++idx)
{
// (n, c, ph, pw) is an element in the aligned output
int pw = idx % aligned_width;
int ph = (idx / aligned_width) % aligned_height;
int c = (idx / aligned_width / aligned_height) % channels;
int n = idx / aligned_width / aligned_height / channels;
float roi_batch_ind = bottom_rois[n * 5 + 0];
float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale;
float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale;
float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale;
float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale;
// Force malformed ROI to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.);
float bin_size_h = roi_height / (aligned_height - 1.);
float bin_size_w = roi_width / (aligned_width - 1.);
float h = (float)(ph) * bin_size_h + roi_start_h;
float w = (float)(pw) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
int img_start = roi_batch_ind * channels * height * width;
// bilinear interpolation
if (h < 0 || h >= height || w < 0 || w >= width)
{
float h_ratio = h - (float)(hstart);
float w_ratio = w - (float)(wstart);
int upleft = img_start + (c * height + hstart) * width + wstart;
int upright = upleft + 1;
int downleft = upleft + width;
int downright = downleft + 1;
bottom_diff[upleft] += top_diff[idx] * (1. - h_ratio) * (1. - w_ratio);
bottom_diff[upright] += top_diff[idx] * (1. - h_ratio) * w_ratio;
bottom_diff[downleft] += top_diff[idx] * h_ratio * (1. - w_ratio);
bottom_diff[downright] += top_diff[idx] * h_ratio * w_ratio;
}
}
}
|
computeGraph.c | #include "defs.h"
double computeGraph(graph* G, graphSDG* SDGdata) {
VERT_T* endV;
LONG_T *degree, *numEdges, *pos, *pSums;
WEIGHT_T* w;
double elapsed_time;
#ifdef _OPENMP
omp_lock_t *vLock;
LONG_T chunkSize;
#endif
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
LONG_T i, j, u, n, m, tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
tid = 0;
nthreads = 1;
#endif
n = N;
m = M;
if (tid == 0) {
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
assert(vLock != NULL);
chunkSize = n/nthreads;
#endif
pos = (LONG_T *) malloc(m*sizeof(LONG_T));
assert(pos != NULL);
degree = (LONG_T *) calloc(n, sizeof(LONG_T));
assert(degree != NULL);
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#pragma omp barrier
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock initialization time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
#ifdef _OPENMP
omp_set_lock(&vLock[u]);
#endif
pos[i] = degree[u]++;
#ifdef _OPENMP
omp_unset_lock(&vLock[u]);
#endif
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Degree computation time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
if (tid == 0)
free(vLock);
#endif
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock destruction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T));
pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
prefix_sums(degree, numEdges, pSums, n);
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Prefix sums time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
free(degree);
free(pSums);
w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
endV = (VERT_T *) malloc(m* sizeof(VERT_T));
}
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
j = numEdges[u] + pos[i];
endV[j] = SDGdata->endVertex[i];
w[j] = SDGdata->weight[i];
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Edge data structure construction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
free(pos);
G->n = n;
G->m = m;
G->numEdges = numEdges;
G->endV = endV;
G->weight = w;
}
#ifdef _OPENMP
#endif
}
/* Verification */
#if 0
fprintf(stderr, "SDG data:\n");
for (int i=0; i<SDGdata->m; i++) {
fprintf(stderr, "[%ld %ld %ld] ", SDGdata->startVertex[i],
SDGdata->endVertex[i], SDGdata->weight[i]);
}
fprintf(stderr, "\n");
for (int i=0; i<G->n + 1; i++) {
fprintf(stderr, "[%ld] ", G->numEdges[i]);
}
fprintf(stderr, "\nGraph:\n");
for (int i=0; i<G->n; i++) {
for (int j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
fprintf(stderr, "[%ld %ld %ld] ", i, G->endV[j], G->weight[j]);
}
}
#endif
free(SDGdata->startVertex);
free(SDGdata->endVertex);
free(SDGdata->weight);
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
vla_crash.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
int a;
void foo() {
int(*b)[a];
int *(**c)[a];
#pragma omp parallel if (0)
b[0][0] = c[0][a][0][a];
}
void bar(int n, int *a) {
// expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}}
int(*p)[n] = &a;
#pragma omp parallel if(0)
// expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}}
if (p == &a) {
}
}
// CHECK1-LABEL: define {{[^@]+}}@foo
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[C:%.*]] = alloca i32***, align 8
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[B]], i64 [[TMP4]], i32**** [[C]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i64 [[VLA1:%.*]], i32**** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32****, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i32**** [[C]], i32***** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32****, i32***** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32***, i32**** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32**, i32*** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[TMP7:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP2]]
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32*, i32** [[TMP5]], i64 [[TMP7]]
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32*, i32** [[ARRAYIDX3]], i64 0
// CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[ARRAYIDX4]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM5]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 0, [[TMP0]]
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[TMP12]]
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX7]], i64 0
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@bar
// CHECK1-SAME: (i32 signext [[N:%.*]], i32* [[A:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[P:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32** [[A_ADDR]] to i32*
// CHECK1-NEXT: store i32* [[TMP3]], i32** [[P]], align 8
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @.omp_outlined..1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[P]], i32** [[A_ADDR]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[P:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[P_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32** [[P]], i32*** [[P_ADDR]], align 8
// CHECK1-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[P_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32** [[TMP2]] to i32*
// CHECK1-NEXT: [[CMP:%.*]] = icmp eq i32* [[TMP3]], [[TMP4]]
// CHECK1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1: if.then:
// CHECK1-NEXT: br label [[IF_END]]
// CHECK1: if.end:
// CHECK1-NEXT: ret void
//
// |
convolutiondepthwise_3x3_pack8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const signed char* k0 = kernel.row<const signed char>(g);
int* outptr0 = out.row<int>(0);
int* outptr1 = out.row<int>(1);
const Mat img0 = bottom_blob.channel(g);
const signed char* r0 = img0.row<const signed char>(0);
const signed char* r1 = img0.row<const signed char>(1);
const signed char* r2 = img0.row<const signed char>(2);
const signed char* r3 = img0.row<const signed char>(3);
int8x8_t _k00 = vld1_s8(k0);
int8x8_t _k01 = vld1_s8(k0 + 8);
int8x8_t _k02 = vld1_s8(k0 + 16);
int8x8_t _k10 = vld1_s8(k0 + 24);
int8x8_t _k11 = vld1_s8(k0 + 32);
int8x8_t _k12 = vld1_s8(k0 + 40);
int8x8_t _k20 = vld1_s8(k0 + 48);
int8x8_t _k21 = vld1_s8(k0 + 56);
int8x8_t _k22 = vld1_s8(k0 + 64);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
int8x16_t _r0001 = vld1q_s8(r0);
int8x16_t _r0203 = vld1q_s8(r0 + 16);
int8x16_t _r1011 = vld1q_s8(r1);
int8x16_t _r1213 = vld1q_s8(r1 + 16);
int8x16_t _r2021 = vld1q_s8(r2);
int8x16_t _r2223 = vld1q_s8(r2 + 16);
int8x16_t _r3031 = vld1q_s8(r3);
int8x16_t _r3233 = vld1q_s8(r3 + 16);
int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00);
int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01);
int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02);
int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10);
int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00);
int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01);
int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02);
int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10);
int16x8_t _s20 = vmull_s8(vget_low_s8(_r1011), _k00);
int16x8_t _s21 = vmull_s8(vget_high_s8(_r1011), _k01);
int16x8_t _s22 = vmull_s8(vget_low_s8(_r1213), _k02);
int16x8_t _s23 = vmull_s8(vget_low_s8(_r2021), _k10);
int16x8_t _s30 = vmull_s8(vget_high_s8(_r1011), _k00);
int16x8_t _s31 = vmull_s8(vget_low_s8(_r1213), _k01);
int16x8_t _s32 = vmull_s8(vget_high_s8(_r1213), _k02);
int16x8_t _s33 = vmull_s8(vget_high_s8(_r2021), _k10);
_s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11);
_s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12);
_s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20);
_s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21);
_s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11);
_s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12);
_s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20);
_s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21);
_s20 = vmlal_s8(_s20, vget_high_s8(_r2021), _k11);
_s21 = vmlal_s8(_s21, vget_low_s8(_r2223), _k12);
_s22 = vmlal_s8(_s22, vget_low_s8(_r3031), _k20);
_s23 = vmlal_s8(_s23, vget_high_s8(_r3031), _k21);
_s30 = vmlal_s8(_s30, vget_low_s8(_r2223), _k11);
_s31 = vmlal_s8(_s31, vget_high_s8(_r2223), _k12);
_s32 = vmlal_s8(_s32, vget_high_s8(_r3031), _k20);
_s33 = vmlal_s8(_s33, vget_low_s8(_r3233), _k21);
int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22);
int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22);
int16x8_t _s28 = vmull_s8(vget_low_s8(_r3233), _k22);
int16x8_t _s38 = vmull_s8(vget_high_s8(_r3233), _k22);
int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01));
int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01));
int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03));
int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03));
int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11));
int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11));
int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13));
int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13));
int32x4_t _sum20 = vaddl_s16(vget_low_s16(_s20), vget_low_s16(_s21));
int32x4_t _sum21 = vaddl_s16(vget_high_s16(_s20), vget_high_s16(_s21));
int32x4_t _sum22 = vaddl_s16(vget_low_s16(_s22), vget_low_s16(_s23));
int32x4_t _sum23 = vaddl_s16(vget_high_s16(_s22), vget_high_s16(_s23));
int32x4_t _sum30 = vaddl_s16(vget_low_s16(_s30), vget_low_s16(_s31));
int32x4_t _sum31 = vaddl_s16(vget_high_s16(_s30), vget_high_s16(_s31));
int32x4_t _sum32 = vaddl_s16(vget_low_s16(_s32), vget_low_s16(_s33));
int32x4_t _sum33 = vaddl_s16(vget_high_s16(_s32), vget_high_s16(_s33));
_sum00 = vaddw_s16(_sum00, vget_low_s16(_s08));
_sum01 = vaddw_s16(_sum01, vget_high_s16(_s08));
_sum10 = vaddw_s16(_sum10, vget_low_s16(_s18));
_sum11 = vaddw_s16(_sum11, vget_high_s16(_s18));
_sum20 = vaddw_s16(_sum20, vget_low_s16(_s28));
_sum21 = vaddw_s16(_sum21, vget_high_s16(_s28));
_sum30 = vaddw_s16(_sum30, vget_low_s16(_s38));
_sum31 = vaddw_s16(_sum31, vget_high_s16(_s38));
_sum00 = vaddq_s32(_sum00, _sum02);
_sum01 = vaddq_s32(_sum01, _sum03);
_sum10 = vaddq_s32(_sum10, _sum12);
_sum11 = vaddq_s32(_sum11, _sum13);
_sum20 = vaddq_s32(_sum20, _sum22);
_sum21 = vaddq_s32(_sum21, _sum23);
_sum30 = vaddq_s32(_sum30, _sum32);
_sum31 = vaddq_s32(_sum31, _sum33);
vst1q_s32(outptr0, _sum00);
vst1q_s32(outptr0 + 4, _sum01);
vst1q_s32(outptr0 + 8, _sum10);
vst1q_s32(outptr0 + 12, _sum11);
vst1q_s32(outptr1, _sum20);
vst1q_s32(outptr1 + 4, _sum21);
vst1q_s32(outptr1 + 8, _sum30);
vst1q_s32(outptr1 + 12, _sum31);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr0 += 16;
outptr1 += 16;
}
for (; j < outw; j++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r10 = vld1_s8(r1);
int8x8_t _r11 = vld1_s8(r1 + 8);
int8x8_t _r12 = vld1_s8(r1 + 16);
int8x8_t _r20 = vld1_s8(r2);
int8x8_t _r21 = vld1_s8(r2 + 8);
int8x8_t _r22 = vld1_s8(r2 + 16);
int8x8_t _r30 = vld1_s8(r3);
int8x8_t _r31 = vld1_s8(r3 + 8);
int8x8_t _r32 = vld1_s8(r3 + 16);
int16x8_t _s00 = vmull_s8(_r00, _k00);
int16x8_t _s01 = vmull_s8(_r01, _k01);
int16x8_t _s02 = vmull_s8(_r02, _k02);
int16x8_t _s03 = vmull_s8(_r10, _k10);
int16x8_t _s10 = vmull_s8(_r10, _k00);
int16x8_t _s11 = vmull_s8(_r11, _k01);
int16x8_t _s12 = vmull_s8(_r12, _k02);
int16x8_t _s13 = vmull_s8(_r20, _k10);
_s00 = vmlal_s8(_s00, _r11, _k11);
_s01 = vmlal_s8(_s01, _r12, _k12);
_s02 = vmlal_s8(_s02, _r20, _k20);
_s03 = vmlal_s8(_s03, _r21, _k21);
_s10 = vmlal_s8(_s10, _r21, _k11);
_s11 = vmlal_s8(_s11, _r22, _k12);
_s12 = vmlal_s8(_s12, _r30, _k20);
_s13 = vmlal_s8(_s13, _r31, _k21);
int16x8_t _s08 = vmull_s8(_r22, _k22);
int16x8_t _s18 = vmull_s8(_r32, _k22);
int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01));
int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01));
int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03));
int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03));
int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11));
int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11));
int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13));
int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13));
_sum00 = vaddw_s16(_sum00, vget_low_s16(_s08));
_sum01 = vaddw_s16(_sum01, vget_high_s16(_s08));
_sum10 = vaddw_s16(_sum10, vget_low_s16(_s18));
_sum11 = vaddw_s16(_sum11, vget_high_s16(_s18));
_sum00 = vaddq_s32(_sum00, _sum02);
_sum01 = vaddq_s32(_sum01, _sum03);
_sum10 = vaddq_s32(_sum10, _sum12);
_sum11 = vaddq_s32(_sum11, _sum13);
vst1q_s32(outptr0, _sum00);
vst1q_s32(outptr0 + 4, _sum01);
vst1q_s32(outptr1, _sum10);
vst1q_s32(outptr1 + 4, _sum11);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr1 += 8;
}
r0 += 2 * 8 + w * 8;
r1 += 2 * 8 + w * 8;
r2 += 2 * 8 + w * 8;
r3 += 2 * 8 + w * 8;
outptr0 += outw * 8;
outptr1 += outw * 8;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
int8x16_t _r0001 = vld1q_s8(r0);
int8x16_t _r0203 = vld1q_s8(r0 + 16);
int8x16_t _r1011 = vld1q_s8(r1);
int8x16_t _r1213 = vld1q_s8(r1 + 16);
int8x16_t _r2021 = vld1q_s8(r2);
int8x16_t _r2223 = vld1q_s8(r2 + 16);
int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00);
int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01);
int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02);
int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10);
int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00);
int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01);
int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02);
int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10);
_s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11);
_s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12);
_s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20);
_s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21);
_s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11);
_s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12);
_s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20);
_s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21);
int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22);
int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22);
int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01));
int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01));
int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03));
int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03));
int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11));
int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11));
int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13));
int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13));
_sum00 = vaddw_s16(_sum00, vget_low_s16(_s08));
_sum01 = vaddw_s16(_sum01, vget_high_s16(_s08));
_sum10 = vaddw_s16(_sum10, vget_low_s16(_s18));
_sum11 = vaddw_s16(_sum11, vget_high_s16(_s18));
_sum00 = vaddq_s32(_sum00, _sum02);
_sum01 = vaddq_s32(_sum01, _sum03);
_sum10 = vaddq_s32(_sum10, _sum12);
_sum11 = vaddq_s32(_sum11, _sum13);
vst1q_s32(outptr0, _sum00);
vst1q_s32(outptr0 + 4, _sum01);
vst1q_s32(outptr0 + 8, _sum10);
vst1q_s32(outptr0 + 12, _sum11);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r10 = vld1_s8(r1);
int8x8_t _r11 = vld1_s8(r1 + 8);
int8x8_t _r12 = vld1_s8(r1 + 16);
int8x8_t _r20 = vld1_s8(r2);
int8x8_t _r21 = vld1_s8(r2 + 8);
int8x8_t _r22 = vld1_s8(r2 + 16);
int16x8_t _s0 = vmull_s8(_r00, _k00);
int16x8_t _s1 = vmull_s8(_r01, _k01);
int16x8_t _s2 = vmull_s8(_r02, _k02);
int16x8_t _s3 = vmull_s8(_r10, _k10);
_s0 = vmlal_s8(_s0, _r11, _k11);
_s1 = vmlal_s8(_s1, _r12, _k12);
_s2 = vmlal_s8(_s2, _r20, _k20);
_s3 = vmlal_s8(_s3, _r21, _k21);
int16x8_t _s4 = vmull_s8(_r22, _k22);
int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1));
int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1));
int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3));
int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s4));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s4));
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const signed char* k0 = kernel.row<const signed char>(g);
int* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const signed char* r0 = img0.row<const signed char>(0);
const signed char* r1 = img0.row<const signed char>(1);
const signed char* r2 = img0.row<const signed char>(2);
int8x8_t _k00 = vld1_s8(k0);
int8x8_t _k01 = vld1_s8(k0 + 8);
int8x8_t _k02 = vld1_s8(k0 + 16);
int8x8_t _k10 = vld1_s8(k0 + 24);
int8x8_t _k11 = vld1_s8(k0 + 32);
int8x8_t _k12 = vld1_s8(k0 + 40);
int8x8_t _k20 = vld1_s8(k0 + 48);
int8x8_t _k21 = vld1_s8(k0 + 56);
int8x8_t _k22 = vld1_s8(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r03 = vld1_s8(r0 + 24);
int8x8_t _r04 = vld1_s8(r0 + 32);
int8x8_t _r10 = vld1_s8(r1);
int8x8_t _r11 = vld1_s8(r1 + 8);
int8x8_t _r12 = vld1_s8(r1 + 16);
int8x8_t _r13 = vld1_s8(r1 + 24);
int8x8_t _r14 = vld1_s8(r1 + 32);
int8x8_t _r20 = vld1_s8(r2);
int8x8_t _r21 = vld1_s8(r2 + 8);
int8x8_t _r22 = vld1_s8(r2 + 16);
int8x8_t _r23 = vld1_s8(r2 + 24);
int8x8_t _r24 = vld1_s8(r2 + 32);
int16x8_t _s00 = vmull_s8(_r00, _k00);
int16x8_t _s01 = vmull_s8(_r01, _k01);
int16x8_t _s02 = vmull_s8(_r02, _k02);
int16x8_t _s03 = vmull_s8(_r10, _k10);
int16x8_t _s10 = vmull_s8(_r02, _k00);
int16x8_t _s11 = vmull_s8(_r03, _k01);
int16x8_t _s12 = vmull_s8(_r04, _k02);
int16x8_t _s13 = vmull_s8(_r12, _k10);
_s00 = vmlal_s8(_s00, _r11, _k11);
_s01 = vmlal_s8(_s01, _r12, _k12);
_s02 = vmlal_s8(_s02, _r20, _k20);
_s03 = vmlal_s8(_s03, _r21, _k21);
_s10 = vmlal_s8(_s10, _r13, _k11);
_s11 = vmlal_s8(_s11, _r14, _k12);
_s12 = vmlal_s8(_s12, _r22, _k20);
_s13 = vmlal_s8(_s13, _r23, _k21);
int16x8_t _s08 = vmull_s8(_r22, _k22);
int16x8_t _s18 = vmull_s8(_r24, _k22);
int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01));
int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01));
int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03));
int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03));
int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11));
int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11));
int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13));
int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13));
_sum00 = vaddw_s16(_sum00, vget_low_s16(_s08));
_sum01 = vaddw_s16(_sum01, vget_high_s16(_s08));
_sum10 = vaddw_s16(_sum10, vget_low_s16(_s18));
_sum11 = vaddw_s16(_sum11, vget_high_s16(_s18));
_sum00 = vaddq_s32(_sum00, _sum02);
_sum01 = vaddq_s32(_sum01, _sum03);
_sum10 = vaddq_s32(_sum10, _sum12);
_sum11 = vaddq_s32(_sum11, _sum13);
vst1q_s32(outptr0, _sum00);
vst1q_s32(outptr0 + 4, _sum01);
vst1q_s32(outptr0 + 8, _sum10);
vst1q_s32(outptr0 + 12, _sum11);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 16;
}
for (; j < outw; j++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r10 = vld1_s8(r1);
int8x8_t _r11 = vld1_s8(r1 + 8);
int8x8_t _r12 = vld1_s8(r1 + 16);
int8x8_t _r20 = vld1_s8(r2);
int8x8_t _r21 = vld1_s8(r2 + 8);
int8x8_t _r22 = vld1_s8(r2 + 16);
int16x8_t _s0 = vmull_s8(_r00, _k00);
int16x8_t _s1 = vmull_s8(_r01, _k01);
int16x8_t _s2 = vmull_s8(_r02, _k02);
int16x8_t _s3 = vmull_s8(_r10, _k10);
_s0 = vmlal_s8(_s0, _r11, _k11);
_s1 = vmlal_s8(_s1, _r12, _k12);
_s2 = vmlal_s8(_s2, _r20, _k20);
_s3 = vmlal_s8(_s3, _r21, _k21);
int16x8_t _s4 = vmull_s8(_r22, _k22);
int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1));
int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1));
int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3));
int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s4));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s4));
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = 1;
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
printf("Enter a number of bits: "); fflush(stdout);
char status = scanf("%u",&n);
//make sure the input makes sense
if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt =(n-1)/8;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
#pragma omp parallel for reduction(+:h)
for (unsigned int i=0;i<p-1;i++) {
if (modExp(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i);
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
shared_private_default.c |
// OpenMP Shared/Private/Default Example
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main( int argc, char* argv[ ] ) {
int id = 0;
int i = 0;
int m = 0;
int x = 2;
#pragma omp parallel private( id, i ) shared( m ) \
default( shared )
{
id = omp_get_thread_num( );
if( id == 0 ) {
i = 3;
m = 17;
x++;
}
printf( "Thread %d: %d %d %d\n", id, i, m, x );
}
return 0;
}
// End shared_private_default.c - EWG SDG
|
convolution_5x5_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1] \n" // r04 r05 r06 r07
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v3.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v3.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v3.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v3.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v4.h[1] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v4.h[3] \n"
"fmla v29.8h, v19.8h, v5.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v4.h[5] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v4.h[7] \n"
"fmla v29.8h, v23.8h, v5.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2] \n" // r14 r15 r16 r17
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v11.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"fmla v28.8h, v18.8h, v11.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v11.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v11.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"fmla v28.8h, v22.8h, v11.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v11.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v12.h[1] \n"
"fmla v29.8h, v17.8h, v13.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v18.8h, v12.h[2] \n"
"fmla v29.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v12.h[3] \n"
"fmla v29.8h, v19.8h, v13.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v12.h[5] \n"
"fmla v29.8h, v21.8h, v13.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"fmla v28.8h, v22.8h, v12.h[6] \n"
"fmla v29.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v12.h[7] \n"
"fmla v29.8h, v23.8h, v13.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3] \n" // r24 r25 r26 r27
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v3.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v3.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v3.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v3.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v4.h[1] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%4], #64 \n" // r30 r31 r32 r33
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v4.h[3] \n"
"fmla v29.8h, v19.8h, v5.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v4.h[5] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v4.h[7] \n"
"fmla v29.8h, v23.8h, v5.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%4] \n" // r34 r35 r36 r37
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v11.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"fmla v28.8h, v18.8h, v11.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v11.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v11.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"fmla v28.8h, v22.8h, v11.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v11.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v12.h[1] \n"
"fmla v29.8h, v17.8h, v13.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%5], #64 \n" // r40 r41 r42 r43
"fmla v28.8h, v18.8h, v12.h[2] \n"
"fmla v29.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v12.h[3] \n"
"fmla v29.8h, v19.8h, v13.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v12.h[5] \n"
"fmla v29.8h, v21.8h, v13.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"fmla v28.8h, v22.8h, v12.h[6] \n"
"fmla v29.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v12.h[7] \n"
"fmla v29.8h, v23.8h, v13.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%5] \n" // r44 r45 r46 r47
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v3.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v3.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v3.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v3.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v4.h[1] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v4.h[3] \n"
"fmla v29.8h, v19.8h, v5.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v4.h[5] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v4.h[7] \n"
"fmla v29.8h, v23.8h, v5.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"sub %6, %6, #3136 \n" // kptr -= 24.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.8h, v1.8h}, [%1], #32 \n" // r00 r01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%1] \n" // r02 r03 r04 r05
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.8h, v9.8h}, [%2], #32 \n" // r10 r11
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v8.h[0] \n"
"fmla v31.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v17.8h, v8.h[1] \n"
"fmla v31.8h, v17.8h, v9.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.8h, v11.8h, v12.8h, v13.8h}, [%2] \n" // r12 r13 r14 r15
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v19.8h, v8.h[3] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v8.h[4] \n"
"fmla v31.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v21.8h, v8.h[5] \n"
"fmla v31.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v23.8h, v8.h[7] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v9.h[0] \n"
"fmla v31.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v17.8h, v9.h[1] \n"
"fmla v31.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v19.8h, v9.h[3] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v9.h[4] \n"
"fmla v31.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v21.8h, v9.h[5] \n"
"fmla v31.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v23.8h, v9.h[7] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r20 r21
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%3] \n" // r22 r23 r24 r25
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.8h, v9.8h}, [%4], #32 \n" // r30 r31
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v8.h[0] \n"
"fmla v31.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v17.8h, v8.h[1] \n"
"fmla v31.8h, v17.8h, v9.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v10.8h, v11.8h, v12.8h, v13.8h}, [%4] \n" // r32 r33 r34 r35
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v19.8h, v8.h[3] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v8.h[4] \n"
"fmla v31.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v21.8h, v8.h[5] \n"
"fmla v31.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v23.8h, v8.h[7] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v9.h[0] \n"
"fmla v31.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v17.8h, v9.h[1] \n"
"fmla v31.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v19.8h, v9.h[3] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v9.h[4] \n"
"fmla v31.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v21.8h, v9.h[5] \n"
"fmla v31.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v23.8h, v9.h[7] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.8h, v1.8h}, [%5], #32 \n" // r40 r41
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%5] \n" // r42 r43 r44 r45
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %6, %6, #3136 \n" // kptr -= 24.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1], #16 \n" // r00
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%1] \n" // r01 r02 r03 r04
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.8h}, [%2], #16 \n" // r10
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v9.8h, v10.8h, v11.8h, v12.8h}, [%2] \n" // r11 r12 r13 r14
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v21.8h, v8.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n" // r20
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%3] \n" // r21 r22 r23 r24
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v8.8h}, [%4], #16 \n" // r30
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v9.8h, v10.8h, v11.8h, v12.8h}, [%4] \n" // r31 r32 r33 r34
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v21.8h, v8.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8h}, [%5], #16 \n" // r40
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%5] \n" // r41 r42 r43 r44
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %6, %6, #3136 \n" // kptr -= 24.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
}
static void conv5x5s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%1] \n" // r04 r05 r06
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v17.8h, v8.h[1] \n"
"fmla v31.8h, v17.8h, v10.h[1] \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v19.8h, v8.h[3] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v21.8h, v8.h[5] \n"
"fmla v31.8h, v21.8h, v10.h[5] \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v23.8h, v8.h[7] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v17.8h, v9.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%2] \n" // r14 r15 r16
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v19.8h, v9.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v21.8h, v9.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v23.8h, v9.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"fmla v28.8h, v18.8h, v11.h[2] \n"
"fmla v29.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v28.8h, v22.8h, v11.h[6] \n"
"fmla v29.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v16.8h, v14.h[0] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v18.8h, v12.h[2] \n"
"fmla v29.8h, v18.8h, v14.h[2] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v20.8h, v14.h[4] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"fmla v28.8h, v22.8h, v12.h[6] \n"
"fmla v29.8h, v22.8h, v14.h[6] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%3] \n" // r24 r25 r26
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%4], #64 \n" // r30 r31 r32 r33
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v17.8h, v8.h[1] \n"
"fmla v31.8h, v17.8h, v10.h[1] \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v19.8h, v8.h[3] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v21.8h, v8.h[5] \n"
"fmla v31.8h, v21.8h, v10.h[5] \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v23.8h, v8.h[7] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v17.8h, v9.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%4] \n" // r34 r35 r36
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v19.8h, v9.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v21.8h, v9.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v23.8h, v9.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v16.8h, v13.h[0] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"fmla v28.8h, v18.8h, v11.h[2] \n"
"fmla v29.8h, v18.8h, v13.h[2] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v20.8h, v13.h[4] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"fmla v28.8h, v22.8h, v11.h[6] \n"
"fmla v29.8h, v22.8h, v13.h[6] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v16.8h, v14.h[0] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%5], #64 \n" // r40 r41 r42 r43
"fmla v28.8h, v18.8h, v12.h[2] \n"
"fmla v29.8h, v18.8h, v14.h[2] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v20.8h, v14.h[4] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"fmla v28.8h, v22.8h, v12.h[6] \n"
"fmla v29.8h, v22.8h, v14.h[6] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v4.8h, v5.8h, v6.8h}, [%5] \n" // r44 r45 r46
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"fmla v28.8h, v18.8h, v3.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v3.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %6, %6, #3136 \n" // kptr -= 24.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.8h, v1.8h}, [%1], #32 \n" // r00 r01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v2.8h, v3.8h, v4.8h}, [%1] \n" // r02 r03 r04
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.8h, v9.8h}, [%2], #32 \n" // r10 r11
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v17.8h, v8.h[1] \n"
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v21.8h, v8.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v10.8h, v11.8h, v12.8h}, [%2] \n" // r12 r13 r14
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r20 r21
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v2.8h, v3.8h, v4.8h}, [%3] \n" // r22 r23 r24
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.8h, v9.8h}, [%4], #32 \n" // r30 r31
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v17.8h, v8.h[1] \n"
"fmla v30.8h, v18.8h, v8.h[2] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v21.8h, v8.h[5] \n"
"fmla v30.8h, v22.8h, v8.h[6] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v10.8h, v11.8h, v12.8h}, [%4] \n" // r32 r33 r34
"fmla v30.8h, v18.8h, v9.h[2] \n"
"fmla v31.8h, v19.8h, v9.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v22.8h, v9.h[6] \n"
"fmla v31.8h, v23.8h, v9.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v19.8h, v10.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v23.8h, v10.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v11.h[0] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v11.h[4] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v12.h[0] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.8h, v1.8h}, [%5], #32 \n" // r40 r41
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v12.h[4] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v2.8h, v3.8h, v4.8h}, [%5] \n" // r42 r43 r44
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %6, %6, #3136 \n" // kptr -= 24.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
DRB060-matrixmultiply-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic i-k-j matrix multiplication
*/
#define N 100
#define M 100
#define K 100
double a[N][M],b[M][K],c[N][K];
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j,k)
for (i = 0; i < N; i++)
#pragma omp parallel for private(j, k)
for (k = 0; k < K; k++)
#pragma omp parallel for private(j)
for (j = 0; j < M; j++) {
c[i][j] = i * j;
a[i][k] = i * j;
b[k][j] = i * j;
}
return 0;
}
int mmm()
{
int i,j,k;
#pragma omp parallel for private(i, j, k)
for (i = 0; i < N; i++)
for (k = 0; k < K; k++)
for (j = 0; j < M; j++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
return 0;
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++)
for (k = 0; k < K; k++)
for (j = 0; j < M; j++)
printf("%lf %lf %lf\n", c[i][j],a[i][k],b[k][j]);
return 0;
}
int main()
{
init();
mmm();
print();
return 0;
}
|
GB_hcat_slice.c | //------------------------------------------------------------------------------
// GB_hcat_slice: horizontal concatenation of the slices of C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Horizontal concatenation of slices into the matrix C.
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Cnzs, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Cnvecs, nthreads+1, sizeof (int64_t)) ; \
}
#include "GB_mxm.h"
GrB_Info GB_hcat_slice // horizontal concatenation of the slices of C
(
GrB_Matrix *Chandle, // output matrix C to create
int nthreads, // # of slices to concatenate
GrB_Matrix *Cslice, // array of slices of size nthreads
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (nthreads > 1) ;
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT (Cslice != NULL) ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
ASSERT_OK (GB_check (Cslice [tid], "a slice of C", GB0)) ;
ASSERT (!GB_PENDING (Cslice [tid])) ;
ASSERT (!GB_ZOMBIES (Cslice [tid])) ;
ASSERT ((Cslice [tid])->is_hyper) ;
// each Cslice [tid] is constructed as its own matrix, with Cslice
// [tid] = A * Bslice [tid]. It is not a slice of an other matrix, so
// Cslice [tid]->is_slice is false.
ASSERT (!(Cslice [tid])->is_slice) ;
ASSERT ((Cslice [tid])->type == (Cslice [0])->type) ;
ASSERT ((Cslice [tid])->vlen == (Cslice [0])->vlen) ;
ASSERT ((Cslice [tid])->vdim == (Cslice [0])->vdim) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict Cnzs ; // size nthreads+1
int64_t *restrict Cnvecs ; // size nthreads+1
GB_MALLOC_MEMORY (Cnzs, nthreads+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Cnvecs, nthreads+1, sizeof (int64_t)) ;
if (Cnzs == NULL || Cnvecs == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// find the size and type of C
//--------------------------------------------------------------------------
// Let cnz_slice [tid] be the number of entries in Cslice [tid], and let
// cnvec_slice [tid] be the number vectors in Cslice [tid]. Then Cnzs and
// Cnvecs are cumulative sums of cnz_slice and cnvec_slice, respectively:
// Cnzs [tid] = sum of cnz_slice [0:tid-1]
// Cnvecs [tid] = sum of cnvec_slice [0:tid-1]
// both arrays are size nthreads+1. Thus, both Cnzs [0] and Cnvecs [0] are
// zero, and their last entries are the total # entries and vectors in C,
// respectively.
// all the slices have the same type and dimension
GrB_Type ctype = (Cslice [0])->type ;
int64_t cvlen = (Cslice [0])->vlen ;
int64_t cvdim = (Cslice [0])->vdim ;
int64_t cnz = 0 ;
int64_t cnvec = 0 ;
int64_t cnvec_nonempty = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
// compute the cumulative sum of the # entries and # vectors
Cnzs [tid] = cnz ;
Cnvecs [tid] = cnvec ;
cnz += GB_NNZ (Cslice [tid]) ;
cnvec += (Cslice [tid])->nvec ;
// also sum the total number of non-empty vectors in all the slices
cnvec_nonempty += (Cslice [tid])->nvec_nonempty ;
}
Cnzs [nthreads] = cnz ; // total # entries in C
Cnvecs [nthreads] = cnvec ; // total # vectors in C
//--------------------------------------------------------------------------
// create C and allocate all of its space
//--------------------------------------------------------------------------
GrB_Info info ;
GB_CREATE (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true,
GB_FORCE_HYPER, GB_Global_hyper_ratio_get ( ), cnvec, cnz, true,
Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = C->x ;
size_t csize = ctype->size ;
C->nvec_nonempty = cnvec_nonempty ;
C->nvec = cnvec ;
Cp [cnvec] = cnz ;
//--------------------------------------------------------------------------
// copy each slice into C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static,1)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// get the Cslice [tid] and its position in C
int64_t *restrict Csliceh = (Cslice [tid])->h ;
int64_t *restrict Cslicep = (Cslice [tid])->p ;
int64_t *restrict Cslicei = (Cslice [tid])->i ;
GB_void *restrict Cslicex = (Cslice [tid])->x ;
int64_t cnz = Cnzs [tid] ;
int64_t cnz_slice = Cnzs [tid+1] - cnz ;
int64_t cnvec = Cnvecs [tid] ;
int64_t cnvec_slice = Cnvecs [tid+1] - cnvec ;
// copy the row indices and values of Cslice [tid] into Ci and Cx
memcpy (Ci + cnz , Cslicei, cnz_slice * sizeof (int64_t)) ;
memcpy (Cx + cnz * csize, Cslicex, cnz_slice * csize) ;
// copy the column indices of Cslice into Ch
memcpy (Ch + cnvec, Csliceh, cnvec_slice * sizeof (int64_t)) ;
// construct the column pointers of C (shift upwards by cnz)
for (int64_t k = 0 ; k < cnvec_slice ; k++)
{
Cp [cnvec + k] = Cslicep [k] + cnz ;
}
}
//--------------------------------------------------------------------------
// free workspace and finalize the matrix
//--------------------------------------------------------------------------
GB_FREE_WORK ;
C->magic = GB_MAGIC ;
ASSERT_OK (GB_check (C, "C from horizontal concatenation", GB0)) ;
return (GrB_SUCCESS) ;
}
|
1.norace9.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for schedule(dynamic, 4)
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
}
// CHECK: Region is Data Race Free.
// END
|
core_slaset.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlaset.c, normal z -> s, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
// for memset function
#include <string.h>
/***************************************************************************//**
*
* @ingroup core_laset
*
* Sets the elements of the matrix A on the diagonal
* to beta and on the off-diagonals to alpha
*
*******************************************************************************
*
* @param[in] uplo
* Specifies which elements of the matrix are to be set
* - PlasmaUpper: Upper part of A is set;
* - PlasmaLower: Lower part of A is set;
* - PlasmaUpperLower: ALL elements of A are set.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] alpha
* The constant to which the off-diagonal elements are to be set.
*
* @param[in] beta
* The constant to which the diagonal elements are to be set.
*
* @param[in,out] A
* On entry, the m-by-n tile A.
* On exit, A has been set accordingly.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_slaset(plasma_enum_t uplo, int m, int n,
float alpha, float beta,
float *A, int lda)
{
if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) {
// Use memset to zero continuous memory.
memset((void*)A, 0, (size_t)m*n*sizeof(float));
}
else {
// Use LAPACKE_slaset_work to initialize the matrix.
LAPACKE_slaset_work(LAPACK_COL_MAJOR, lapack_const(uplo),
m, n, alpha, beta, A, lda);
}
}
/******************************************************************************/
void plasma_core_omp_slaset(plasma_enum_t uplo,
int mb, int nb,
int i, int j,
int m, int n,
float alpha, float beta,
float *A)
{
#pragma omp task depend(out:A[0:mb*nb])
plasma_core_slaset(uplo, m, n,
alpha, beta,
A+i+j*mb, mb);
}
|
pi.c | #include <err.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef WITH_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
typedef struct {
long n;
int is_verbose;
} Params;
Params get_params(int argc, char *argv[]);
double *compute_limits();
double partial_pi(double left, double right, Params *params);
#ifdef WITH_MPI
MPI_Datatype *alloc_mpi_params_type();
void free_mpi_params_type(MPI_Datatype *params_type);
#endif
int main(int argc, char *argv[]) {
const int root = 0;
int rank = 0, size = 1;
Params params;
double *limits, local_limits[2], partial_result, result = 0.0;
#ifdef WITH_MPI
#ifdef _OPENMP
int thread_level;
MPI_Init_thread(NULL, NULL, MPI_THREAD_FUNNELED, &thread_level);
if (thread_level != MPI_THREAD_FUNNELED) {
fprintf(stderr, "thread level funneled not supported\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
#else
MPI_Init(NULL, NULL);
#endif
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
if (rank == 0) {
params = get_params(argc, argv);
limits = compute_limits();
}
#ifdef WITH_MPI
MPI_Datatype *params_type = alloc_mpi_params_type();
MPI_Bcast(¶ms, 1, *params_type, root, MPI_COMM_WORLD);
free_mpi_params_type(params_type);
MPI_Scatter(limits, 2, MPI_DOUBLE, local_limits, 2, MPI_DOUBLE, root,
MPI_COMM_WORLD);
#else
local_limits[0] = limits[0];
local_limits[1] = limits[1];
#endif
if (rank == 0) {
free(limits);
}
if (params.is_verbose)
printf("rank %d out of %d: %.5lf -> %.5lf (%ld)\n",
rank, size, local_limits[0], local_limits[1], params.n);
partial_result = partial_pi(local_limits[0], local_limits[1], ¶ms);
#ifdef WITH_MPI
MPI_Reduce(&partial_result, &result, 1, MPI_DOUBLE, MPI_SUM, root,
MPI_COMM_WORLD);
#else
result = partial_result;
#endif
if (rank == 0) {
printf("pi = %.8lf\n", result);
}
#ifdef WITH_MPI
MPI_Finalize();
#endif
return EXIT_SUCCESS;
}
Params get_params(int argc, char *argv[]) {
int size = 1;
Params params;
#ifdef WITH_MPI
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
if (argc > 1)
params.n = atol(argv[1]);
else
params.n = 1000;
params.n = params.n/size;
if (argc > 2)
params.is_verbose = atoi(argv[2]);
else
params.is_verbose = 0;
return params;
}
double *compute_limits() {
int i, size = 1;
double *limits, delta;
#ifdef WITH_MPI
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
delta = 1.0/size;
if ((limits = (double *) malloc(2*size*sizeof(double))) == NULL) {
warnx("can not allocate array of %d elements", 2*size);
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
#else
exit(EXIT_FAILURE);
#endif
}
for (i = 0; i < size; i++) {
limits[2*i] = i*delta;
limits[2*i + 1] += (i + 1)*delta;
}
return limits;
}
double partial_pi(double left, double right, Params *params) {
int rank = 0;
double sum = 0.0, result;
double delta = (right - left)/params->n;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
#pragma omp parallel default(none) \
shared(sum, delta, left, right, result, params, rank)
{
long i;
double x;
if (params->is_verbose) {
int thread_num = 0, num_threads = 1;
#ifdef _OPENMP
thread_num = omp_get_thread_num();
num_threads = omp_get_num_threads();
#endif
printf("thread %d out of %d at rank %d\n",
thread_num, num_threads, rank);
}
#pragma omp for reduction(+:sum)
for (i = 0; i < params->n; i++) {
x = left + i*delta;
sum += 4.0/(1.0 + x*x);
}
#pragma omp single
result = sum*delta;
}
return result;
}
#ifdef WITH_MPI
MPI_Datatype *alloc_mpi_params_type() {
const int type_count = 2;
int block_lengths[] = {1, 1};
MPI_Aint displacements[] = {0, sizeof(long)};
MPI_Datatype types[] = {MPI_LONG, MPI_INT};
MPI_Datatype *params_type;
if (!(params_type = (MPI_Datatype *) malloc(sizeof(MPI_Datatype)))) {
warnx("can ot allocate MPI data type");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
MPI_Type_create_struct(type_count, block_lengths, displacements, types,
params_type);
MPI_Type_commit(params_type);
return params_type;
}
void free_mpi_params_type(MPI_Datatype *params_type) {
MPI_Type_free(params_type);
free(params_type);
}
#endif
|
utils.c | // Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved.
// Copyright 2015. UChicago Argonne, LLC. This software was produced
// under U.S. Government contract DE-AC02-06CH11357 for Argonne National
// Laboratory (ANL), which is operated by UChicago Argonne, LLC for the
// U.S. Department of Energy. The U.S. Government has rights to use,
// reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
// UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
// ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
// modified to produce derivative works, such modified software should
// be clearly marked, so as not to confuse it with the version available
// from ANL.
// Additionally, redistribution and use in source and binary forms, with
// or without modification, are permitted provided that the following
// conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of UChicago Argonne, LLC, Argonne National
// Laboratory, ANL, the U.S. Government, nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago
// Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <float.h>
#include <stdint.h>
#include "utils.h"
// for windows build
#ifdef WIN32
# ifdef PY3K
void
PyInit_libtomopy(void)
{
}
# else
void
initlibtomopy(void)
{
}
# endif
#endif
//======================================================================================//
void
preprocessing(int ry, int rz, int num_pixels, float center, float* mov, float* gridx,
float* gridy)
{
for(int i = 0; i <= ry; ++i)
{
gridx[i] = -ry * 0.5f + i;
}
for(int i = 0; i <= rz; ++i)
{
gridy[i] = -rz * 0.5f + i;
}
*mov = ((float) num_pixels - 1) * 0.5f - center;
if(*mov - floor(*mov) < 0.01f)
{
*mov += 0.01f;
}
*mov += 0.5;
}
//======================================================================================//
int
calc_quadrant(float theta_p)
{
// here we cast the float to an integer and rescale the integer to
// near INT_MAX to retain the precision. This method was tested
// on 1M random random floating points between -2*pi and 2*pi and
// was found to produce a speed up of:
//
// - 14.5x (Intel i7 MacBook)
// - 2.2x (NERSC KNL)
// - 1.5x (NERSC Edison)
// - 1.7x (NERSC Haswell)
//
// with a 0.0% incorrect quadrant determination rate
//
const int32_t ipi_c = 340870420;
int32_t theta_i = (int32_t)(theta_p * ipi_c);
theta_i += (theta_i < 0) ? (2.0f * M_PI * ipi_c) : 0;
return ((theta_i >= 0 && theta_i < 0.5f * M_PI * ipi_c) ||
(theta_i >= 1.0f * M_PI * ipi_c && theta_i < 1.5f * M_PI * ipi_c))
? 1
: 0;
}
//======================================================================================//
void
calc_coords(int ry, int rz, float xi, float yi, float sin_p, float cos_p,
const float* gridx, const float* gridy, float* coordx, float* coordy)
{
float srcx = xi * cos_p - yi * sin_p;
float srcy = xi * sin_p + yi * cos_p;
float detx = -xi * cos_p - yi * sin_p;
float dety = -xi * sin_p + yi * cos_p;
float slope = (srcy - dety) / (srcx - detx);
float islope = (srcx - detx) / (srcy - dety);
#pragma omp simd
for(int n = 0; n <= rz; ++n)
{
coordx[n] = islope * (gridy[n] - srcy) + srcx;
}
#pragma omp simd
for(int n = 0; n <= ry; ++n)
{
coordy[n] = slope * (gridx[n] - srcx) + srcy;
}
}
//======================================================================================//
void
trim_coords(int ry, int rz, const float* coordx, const float* coordy, const float* gridx,
const float* gridy, int* asize, float* ax, float* ay, int* bsize, float* bx,
float* by)
{
*asize = 0;
*bsize = 0;
float gridx_gt = gridx[0] + 0.01f;
float gridx_le = gridx[ry] - 0.01f;
for(int n = 0; n <= rz; ++n)
{
if(coordx[n] >= gridx_gt && coordx[n] <= gridx_le)
{
ax[*asize] = coordx[n];
ay[*asize] = gridy[n];
++(*asize);
}
}
float gridy_gt = gridy[0] + 0.01f;
float gridy_le = gridy[rz] - 0.01f;
for(int n = 0; n <= ry; ++n)
{
if(coordy[n] >= gridy_gt && coordy[n] <= gridy_le)
{
bx[*bsize] = gridx[n];
by[*bsize] = coordy[n];
++(*bsize);
}
}
}
//======================================================================================//
void
sort_intersections(int ind_condition, int asize, const float* ax, const float* ay,
int bsize, const float* bx, const float* by, int* csize, float* coorx,
float* coory)
{
int i = 0, j = 0, k = 0;
if(ind_condition == 0)
{
while(i < asize && j < bsize)
{
if(ax[asize - 1 - i] < bx[j])
{
coorx[k] = ax[asize - 1 - i];
coory[k] = ay[asize - 1 - i];
++i;
}
else
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
}
++k;
}
while(i < asize)
{
coorx[k] = ax[asize - 1 - i];
coory[k] = ay[asize - 1 - i];
++i;
++k;
}
while(j < bsize)
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
++k;
}
(*csize) = asize + bsize;
}
else
{
while(i < asize && j < bsize)
{
if(ax[i] < bx[j])
{
coorx[k] = ax[i];
coory[k] = ay[i];
++i;
}
else
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
}
++k;
}
while(i < asize)
{
coorx[k] = ax[i];
coory[k] = ay[i];
++i;
++k;
}
while(j < bsize)
{
coorx[k] = bx[j];
coory[k] = by[j];
++j;
++k;
}
(*csize) = asize + bsize;
}
}
//======================================================================================//
void
calc_dist(int ry, int rz, int csize, const float* coorx, const float* coory, int* indi,
float* dist)
{
if(csize < 2)
return;
const int _size = csize - 1;
//------------------------------------------------------------------------//
// calculate dist
//------------------------------------------------------------------------//
{
float* _diffx = malloc(_size * sizeof(float));
float* _diffy = malloc(_size * sizeof(float));
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
_diffx[n] = (coorx[n + 1] - coorx[n]) * (coorx[n + 1] - coorx[n]);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
_diffy[n] = (coory[n + 1] - coory[n]) * (coory[n + 1] - coory[n]);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
dist[n] = sqrtf(_diffx[n] + _diffy[n]);
}
free(_diffx);
free(_diffy);
}
//------------------------------------------------------------------------//
// calculate indi
//------------------------------------------------------------------------//
int* _indx = malloc(_size * sizeof(int));
int* _indy = malloc(_size * sizeof(int));
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
float _midx = 0.5f * (coorx[n + 1] + coorx[n]);
float _x1 = _midx + 0.5f * ry;
float _i1 = (int) (_midx + 0.5f * ry);
_indx[n] = _i1 - (_i1 > _x1);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
float _midy = 0.5f * (coory[n + 1] + coory[n]);
float _x2 = _midy + 0.5f * rz;
float _i2 = (int) (_midy + 0.5f * rz);
_indy[n] = _i2 - (_i2 > _x2);
}
#pragma omp simd
for(int n = 0; n < _size; ++n)
{
indi[n] = _indy[n] + (_indx[n] * rz);
}
free(_indx);
free(_indy);
}
//======================================================================================//
void
calc_dist2(int ry, int rz, int csize, const float* coorx, const float* coory, int* indx,
int* indy, float* dist)
{
#pragma omp simd
for(int n = 0; n < csize - 1; ++n)
{
float diffx = coorx[n + 1] - coorx[n];
float diffy = coory[n + 1] - coory[n];
dist[n] = sqrt(diffx * diffx + diffy * diffy);
}
#pragma omp simd
for(int n = 0; n < csize - 1; ++n)
{
float midx = (coorx[n + 1] + coorx[n]) * 0.5f;
float midy = (coory[n + 1] + coory[n]) * 0.5f;
float x1 = midx + ry * 0.5f;
float x2 = midy + rz * 0.5f;
int i1 = (int) (midx + ry * 0.5f);
int i2 = (int) (midy + rz * 0.5f);
indx[n] = i1 - (i1 > x1);
indy[n] = i2 - (i2 > x2);
}
}
//======================================================================================//
void
calc_simdata(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indi, const float* dist, const float* model, float* simdata)
{
int index_model = s * ry * rz;
int index_data = d + p * dx + s * dt * dx;
for(int n = 0; n < csize - 1; ++n)
{
simdata[index_data] += model[indi[n] + index_model] * dist[n];
}
}
//======================================================================================//
void
calc_simdata2(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indx, const int* indy, const float* dist, float vx, float vy,
const float* modelx, const float* modely, float* simdata)
{
int n;
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx +
modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) *
dist[n];
}
}
//======================================================================================//
void
calc_simdata3(int s, int p, int d, int ry, int rz, int dt, int dx, int csize,
const int* indx, const int* indy, const float* dist, float vx, float vy,
const float* modelx, const float* modely, const float* modelz, int axis,
float* simdata)
{
int n;
if(axis == 0)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx +
modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) *
dist[n];
}
}
else if(axis == 1)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modely[s + indx[n] * rz + indy[n] * ry * rz] * vx +
modelz[s + indx[n] * rz + indy[n] * ry * rz] * vy) *
dist[n];
}
}
else if(axis == 2)
{
for(n = 0; n < csize - 1; n++)
{
simdata[d + p * dx + s * dt * dx] +=
(modelx[indx[n] + s * rz + indy[n] * ry * rz] * vx +
modelz[indx[n] + s * rz + indy[n] * ry * rz] * vy) *
dist[n];
}
}
}
//======================================================================================//
|
GeometryConverterOCC.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <BRepAdaptor_Curve.hxx>
#include <BRep_Tool.hxx>
#include <GCPnts_AbscissaPoint.hxx>
#include <GCPnts_UniformAbscissa.hxx>
#include <Geom_Line.hxx>
#include <TopExp.hxx>
#include <TopExp_Explorer.hxx>
#include <TopoDS.hxx>
#include <TopoDS_Edge.hxx>
#include <TopoDS_Shape.hxx>
#include <TopoDS_Vertex.hxx>
#include <ifcpp/geometry/GeometrySettings.h>
#include <ifcpp/model/BuildingModel.h>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h>
#include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include "RepresentationConverterOCC.h"
#include "GeometryInputDataOCC.h"
class GeometryConverterOCC : public StatusCallback
{
protected:
shared_ptr<BuildingModel> m_ifc_model;
shared_ptr<GeometrySettings> m_geom_settings;
shared_ptr<RepresentationConverterOCC> m_representation_converter;
std::map<int, shared_ptr<ProductShapeDataOCC> > m_product_shape_data;
std::map<int, shared_ptr<BuildingObject> > m_map_outside_spatial_structure;
double m_recent_progress;
std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages;
#ifdef ENABLE_OPENMP
Mutex m_writelock_messages;
Mutex m_writelock_appearance_cache;
#endif
public:
// getters and setters
shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; }
shared_ptr<RepresentationConverterOCC>& getRepresentationConverter() { return m_representation_converter; }
std::map<int, shared_ptr<ProductShapeDataOCC> >& getShapeInputData() { return m_product_shape_data; }
shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; }
std::map<int, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; }
GeometryConverterOCC( shared_ptr<BuildingModel>& ifc_model )
{
m_ifc_model = ifc_model;
m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() );
resetNumVerticesPerCircle();
shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter();
m_representation_converter = shared_ptr<RepresentationConverterOCC>( new RepresentationConverterOCC( m_geom_settings, unit_converter ) );
// redirect all messages to this
m_ifc_model->setMessageTarget( this );
m_representation_converter->setMessageTarget( this );
}
virtual ~GeometryConverterOCC() {}
void resetModel()
{
progressTextCallback( L"Unloading model, cleaning up memory..." );
clearInputCache();
m_recent_progress = 0.0;
m_ifc_model->clearCache();
m_ifc_model->clearIfcModel();
progressTextCallback( L"Unloading model done" );
progressValueCallback( 0.0, "parse" );
#ifdef _DEBUG
//GeomDebugUtils::clearDebugDumpFile();
#endif
}
void clearInputCache()
{
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
m_messages.clear();
}
void resetNumVerticesPerCircle()
{
m_geom_settings->resetNumVerticesPerCircle();
}
void setModel( shared_ptr<BuildingModel> model )
{
if( m_ifc_model )
{
m_ifc_model->unsetMessageCallBack();
}
clearInputCache();
m_ifc_model = model;
m_representation_converter->clearCache();
m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() );
m_ifc_model->setMessageTarget( this );
}
/*\brief method convertGeometry: Creates geometry for OpenCascade from previously loaded BuildingModel model.
\param[out] parent_group Group to append the resulting geometry.
**/
void convertGeometry()
{
progressTextCallback( L"Creating geometry..." );
progressValueCallback( 0, "geometry" );
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
shared_ptr<ProductShapeDataOCC> ifc_project_data;
std::vector<shared_ptr<IfcObjectDefinition> > vec_ifc_object_defs;
const double length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities();
for( auto it = map_entities.begin(); it != map_entities.end(); ++it )
{
shared_ptr<BuildingEntity> obj = it->second;
shared_ptr<IfcObjectDefinition> object_def = dynamic_pointer_cast<IfcObjectDefinition>(obj);
if( object_def )
{
vec_ifc_object_defs.push_back( object_def );
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<int, shared_ptr<ProductShapeDataOCC> >* map_products_ptr = &m_product_shape_data;
const int num_products = (int)vec_ifc_object_defs.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
#pragma omp parallel firstprivate(num_products) shared(map_products_ptr)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,10)
#endif
for( int i = 0; i < num_products; ++i )
{
shared_ptr<IfcObjectDefinition> ifc_object_def = vec_ifc_object_defs[i];
const int product_id = ifc_object_def->m_entity_id;
shared_ptr<ProductShapeDataOCC> product_geom_input_data( new ProductShapeDataOCC( product_id ) );
product_geom_input_data->m_ifc_object_definition = ifc_object_def;
std::stringstream thread_err;
if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) )
{
// geometry will be created in method subtractOpenings
continue;
}
else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_ifc_project );
#endif
ifc_project_data = product_geom_input_data;
}
try
{
convertIfcProductShape( product_geom_input_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( Standard_Failure& sf )
{
thread_err << sf.GetMessageString();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << product_id;
}
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_products_ptr->insert( std::make_pair( product_id, product_geom_input_data ) );
if( thread_err.tellp() > 0 )
{
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
}
// progress callback
double progress = (double)i / (double)num_products;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "geometry" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data );
}
// check if there are entities that are not in spatial structure
for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes )
{
shared_ptr<ProductShapeDataOCC> product_shape = it_product_shapes->second;
if( !product_shape )
{
continue;
}
if( !product_shape->m_added_to_spatial_structure )
{
if( !product_shape->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> ifc_product( product_shape->m_ifc_object_definition );
shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_product);
if( opening )
{
continue;
}
m_map_outside_spatial_structure[ifc_product->m_entity_id] = ifc_product;
}
}
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
m_representation_converter->getProfileCache()->clearProfileCache();
progressTextCallback( L"Loading file done" );
progressValueCallback( 1.0, "geometry" );
}
//\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertIfcProductShape( shared_ptr<ProductShapeDataOCC>& product_shape )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition );
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if( !ifc_product )
{
return;
}
if( !ifc_product->m_Representation )
{
return;
}
// evaluate IFC geometry
const double length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation;
std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations;
for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations )
{
const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations];
try
{
shared_ptr<RepresentationDataOCC> representation_data( new RepresentationDataOCC() );
m_representation_converter->convertIfcRepresentation( representation, representation_data );
product_shape->m_vec_representations.push_back( representation_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
}
// IfcProduct has an ObjectPlacement that can be local or global
gp_Trsf product_placement_matrix;
if( ifc_product->m_ObjectPlacement )
{
// IfcPlacement2Matrix follows related placements in case of local coordinate systems
std::unordered_set<IfcObjectPlacement*> placement_already_applied;
PlacementConverterOCC::convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, length_factor, product_placement_matrix, this, placement_already_applied );
product_shape->applyTransformToProduct( product_placement_matrix );
}
// handle openings
std::vector<shared_ptr<ProductShapeDataOCC> > vec_opening_data;
const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( ifc_element )
{
m_representation_converter->subtractOpenings( ifc_element, product_shape );
}
// Fetch the IFCProduct relationships
if( ifc_product->m_IsDefinedBy_inverse.size() > 0 )
{
std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse;
for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i )
{
shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] );
shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition;
if( relating_property_definition_select )
{
// TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet);
shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select);
if( property_set_def )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
continue;
}
shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select);
if( property_set_def_set )
{
std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec;
std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def;
for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def )
{
shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def);
if( property_set_def2 )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
}
}
continue;
}
}
}
}
}
void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeDataOCC>& product_shape )
{
if( !prop_set )
{
return;
}
for( auto& ifc_property : prop_set->m_HasProperties )
{
if( !ifc_property )
{
continue;
}
shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property);
if( simple_property )
{
// ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue,
// IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue))
shared_ptr<IfcIdentifier> property_name = simple_property->m_Name;
std::wstring name_str = property_name->m_value;
if( name_str.compare( L"LayerName" ) == 0 )
{
// TODO: implement layers
}
shared_ptr<IfcText> description = simple_property->m_Description;
shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property);
if( property_single_value )
{
//shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional
//shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional
}
continue;
}
shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property);
if( complex_property )
{
if( !complex_property->m_UsageName ) continue;
if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 )
{
vec4 vec_color;
m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color );
shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) );
if( !appearance_data )
{
throw OutOfMemoryException( __FUNC__ );
}
appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY;
appearance_data->m_color_ambient = vec_color;
appearance_data->m_color_diffuse = vec_color;
appearance_data->m_color_specular = vec_color;
appearance_data->m_shininess = 35.f;
product_shape->addAppearance( appearance_data );
}
}
}
}
void resolveProjectStructure( shared_ptr<ProductShapeDataOCC>& product_data )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
product_data->m_added_to_spatial_structure = true;
shared_ptr<IfcObjectDefinition> ifc_object_def( product_data->m_ifc_object_definition );
const int entity_id = ifc_object_def->m_entity_id;
const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_object_def->m_IsDecomposedBy_inverse;
for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii )
{
const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii];
if( rel_aggregates_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr );
if( rel_aggregates )
{
const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects;
for( size_t jj = 0; jj < vec_related_objects.size(); ++jj )
{
const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj];
if( related_obj_def )
{
auto it_product_map = m_product_shape_data.find( related_obj_def->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeDataOCC>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_object_def);
if( spatial_ele )
{
const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse;
for( size_t ii = 0; ii < vec_contains.size(); ++ii )
{
const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii];
if( rel_contained_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr );
if( rel_contained )
{
const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements;
for( size_t jj = 0; jj < vec_related_elements.size(); ++jj )
{
const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj];
if( related_product )
{
auto it_product_map = m_product_shape_data.find( related_product->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeDataOCC>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
}
// TODO: handle IfcRelAssignsToProduct
}
virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m )
{
GeometryConverterOCC* myself = (GeometryConverterOCC*)ptr;
if( myself )
{
if( m->m_entity )
{
#ifdef ENABLE_OPENMP
ScopedLock lock( myself->m_writelock_messages );
#endif
// make sure that the same message for one entity does not appear several times
const int entity_id = m->m_entity->m_entity_id;
auto it = myself->m_messages.find( entity_id );
if( it != myself->m_messages.end() )
{
std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second;
for( size_t i = 0; i < vec_message_for_entity.size(); ++i )
{
shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i];
if( existing_message->m_message_text.compare( m->m_message_text ) == 0 )
{
// same message for same entity is already there, so ignore message
return;
}
}
vec_message_for_entity.push_back( m );
}
else
{
std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second;
vec.push_back( m );
}
}
myself->messageCallback( m );
}
}
};
|
original.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
singleModificado.c | #include <stdio.h>
#include <omp.h>
main() {
int n = 9, i, a, b[n];
for (i=0; i<n; i++) b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicialización a: ");
scanf("%d", &a );
printf("Single 1 ejecutada por el thread %d\n", omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
#pragma omp single
{
printf("Single 2 ejecutada por el thread %d\n", omp_get_thread_num());
printf("Depués de la región parallel:\n");
for (i=0; i<n; i++)
printf("b[%d] = %d\t\n",i,b[i]);
}
}
return 0;
}
|
SoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SoftMax.c"
#else
#ifdef _MSC_VER
#define SOFTMAX_SIZE_TYPE int64_t
#define SOFTMAX_CAST_TYPE (int64_t)
#else
#define SOFTMAX_SIZE_TYPE uint64_t
#define SOFTMAX_CAST_TYPE
#endif
void THNN_(SoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int64_t dim) {
THArgCheck(dim >= 0 && dim < input->nDimension, 4,
"dim out of range (got %d, but input has %d dims)", dim, input->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = input->size[dim];
uint64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= input->size[i];
for (int64_t i = dim + 1; i < input->nDimension; ++i)
inner_size *= input->size[i];
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data_base = THTensor_(data)(input);
real *output_data_base = THTensor_(data)(output);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < SOFTMAX_CAST_TYPE (outer_size * inner_size); i++) {
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *input_data = input_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real input_max = -THInf;
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
if (input_data[d * dim_stride] >= input_max) input_max = input_data[d * dim_stride];
}
accreal sum = 0;
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
real z = exp(input_data[d * dim_stride] - input_max);
output_data[d * dim_stride] = z;
sum += z;
}
real invsum = 1 / sum; // NOTE: truncate sum to real once
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
output_data[d * dim_stride] *= invsum;
}
}
THTensor_(free)(input);
}
void THNN_(SoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
int64_t dim)
{
THNN_CHECK_SHAPE(output, gradOutput);
THArgCheck(dim >= 0 && dim < output->nDimension, 6,
"dim out of range (got %d, but input has %d dims)", dim, output->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = output->size[dim];
uint64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output->size[i];
for (int64_t i = dim + 1; i < output->nDimension; ++i)
inner_size *= output->size[i];
gradOutput = THTensor_(newContiguous)(gradOutput);
output = THTensor_(newContiguous)(output);
THTensor_(resizeAs)(gradInput, output);
real *gradInput_data_base = THTensor_(data)(gradInput);
real *output_data_base = THTensor_(data)(output);
real *gradOutput_data_base = THTensor_(data)(gradOutput);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < SOFTMAX_CAST_TYPE (outer_size * inner_size); i++)
{
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *gradInput_data = gradInput_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real *gradOutput_data = gradOutput_data_base + outer_idx * outer_stride + inner_idx;
accreal sum = 0;
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++)
sum += ((accreal)gradOutput_data[d * dim_stride]) * ((accreal)output_data[d * dim_stride]);
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++)
gradInput_data[d * dim_stride] = output_data[d * dim_stride] * (gradOutput_data[d * dim_stride] - sum);
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
delete_inf_refcount.c | // RUN: %libomptarget-compile-run-and-check-generic
#include <stdio.h>
#include <omp.h>
#pragma omp declare target
int isHost;
#pragma omp end declare target
int main(void) {
isHost = -1;
#pragma omp target enter data map(to: isHost)
#pragma omp target
{ isHost = omp_is_initial_device(); }
#pragma omp target update from(isHost)
if (isHost < 0) {
printf("Runtime error, isHost=%d\n", isHost);
}
#pragma omp target exit data map(delete: isHost)
// CHECK: Target region executed on the device
printf("Target region executed on the %s\n", isHost ? "host" : "device");
return isHost;
}
|
invert.c | /* Copyright 2016. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2016 Jon Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdlib.h>
#include <assert.h>
#include <complex.h>
#include <stdio.h>
#include "num/multind.h"
#include "num/init.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#ifndef DIMS
#define DIMS 16
#endif
static const char usage_str[] = "<input> <output>";
static const char help_str[] = "Invert array (1 / <input>). The output is set to zero in case of divide by zero.\n";
int main_invert(int argc, char* argv[])
{
mini_cmdline(argc, argv, 2, usage_str, help_str);
num_init();
long dims[DIMS];
complex float* idata = load_cfl(argv[1], DIMS, dims);
complex float* odata = create_cfl(argv[2], DIMS, dims);
#pragma omp parallel for
for (long i = 0; i < md_calc_size(DIMS, dims); i++)
odata[i] = idata[i] == 0 ? 0. : 1. / idata[i];
unmap_cfl(DIMS, dims, idata);
unmap_cfl(DIMS, dims, odata);
exit(0);
}
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt row_starts[2];
HYPRE_BigInt col_starts[2];
HYPRE_Int i;
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i] - hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix),
NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
/* WM: TODO - implement for sycl... is this available for other non-cuda/hip gpu implementations? */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(memory_location_aux) == HYPRE_EXEC_HOST)
#endif
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i + 1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i + 1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[0] &&
row_index < row_partitioning[1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[0]);
ncols[i] = diag_i[index + 1] - diag_i[index] + offd_i[index + 1] - offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i = 0; i < nrows; i++)
{
counter[i + 1] = counter[i] + ncols[i];
}
indx = 0;
for (i = 0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
row_size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (counter[i] + row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i + 1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i = 0; i < nrows; i++)
{
ncols[i] = counter[i + 1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
for (i = 0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
/* Reset pointers to NULL */
hypre_IJMatrixObject(matrix) = NULL;
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixTransposeParCSR
*
* Tranposes an IJMatrix of type ParCSRMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixTransposeParCSR( hypre_IJMatrix *matrix_A,
hypre_IJMatrix *matrix_AT )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_AT;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_AT))
{
par_AT = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_AT);
hypre_ParCSRMatrixDestroy(par_AT);
hypre_IJMatrixObject(matrix_AT) = NULL;
}
hypre_ParCSRMatrixTranspose(par_A, &par_AT, 1);
hypre_ParCSRMatrixSetNumNonzeros(par_AT);
hypre_ParCSRMatrixSetDNumNonzeros(par_AT);
hypre_MatvecCommPkgCreate(par_AT);
hypre_IJMatrixObject(matrix_AT) = (void *) par_AT;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixNormParCSR
*
* Computes the Infinity norm of an IJMatrix of type ParCSRMatrix
*
* TODO: Add other norms
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixNormParCSR( hypre_IJMatrix *matrix,
HYPRE_Real *norm )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_ParCSRMatrixInfNorm(par_matrix, norm);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddParCSR
*
* Performs C = alpha*A + beta*B, where A, B and C are IJMatrices of
* type ParCSRMatrix.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddParCSR( HYPRE_Complex alpha,
hypre_IJMatrix *matrix_A,
HYPRE_Complex beta,
hypre_IJMatrix *matrix_B,
hypre_IJMatrix *matrix_C )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_B = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_B);
hypre_ParCSRMatrix *par_C;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_C))
{
par_C = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_C);
hypre_ParCSRMatrixDestroy(par_C);
hypre_IJMatrixObject(matrix_C) = NULL;
}
hypre_ParCSRMatrixAdd(alpha, par_A, beta, par_B, &par_C);
hypre_ParCSRMatrixSetNumNonzeros(par_C);
hypre_ParCSRMatrixSetDNumNonzeros(par_C);
if (!hypre_ParCSRMatrixCommPkg(par_C))
{
hypre_MatvecCommPkgCreate(par_C);
}
hypre_IJMatrixObject(matrix_C) = (void *) par_C;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list = NULL;
HYPRE_Int *row_list_num_elements = NULL;
HYPRE_Int *a_proc_id = NULL, *orig_order = NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts = NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts = NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2 * current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
#if defined(HYPRE_DEBUG)
hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] ||
tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]);
hypre_assert(tmp[i] >= global_first_row &&
tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix));
hypre_assert(off_proc_j_h[i] >= global_first_col &&
off_proc_j_h[i] < global_first_col + global_num_cols);
#endif
off_proc_i_h[2 * i] = tmp[i];
off_proc_i_h[2 * i + 1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx / 2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i = 0; i < num_rows; i++)
{
row = off_proc_i[i * 2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i * 2 + 1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows - 1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i = 1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts + 1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts * 2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i = 0; i < num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[i - 1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter * 2;
ex_contact_buf[counter * 2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter * 2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges / 2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i = 0; i < num_ranges; i++)
{
upper_bound = response_buf[i * 2 + 1];
counter = 0;
tmp_id = response_buf[i * 2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i = 1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i = 0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2 * num_elements_total[i];
ex_contact_vec_starts[i + 1] = -storage - 1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage * obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i = 0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i * 2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j = 0; j < num_elements; j++)
{
tmp_big_int = off_proc_j[counter + j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j = 0; j < num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i = num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i - 1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes * send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for (i = 0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs - 1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i = 0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx * obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j = 0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex,
num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements * 2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d,
NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length += 20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength * object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index * object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf, char, object_size * contact_size, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count + 1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low + 1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *rownnz;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int num_rownnz;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix, off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array;
HYPRE_Int *offd_array;
/* Update nonzero rows of aux_matrix */
hypre_AuxParCSRMatrixSetRownnz(aux_matrix);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix);
rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix);
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST);
i_diag = i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int ii, rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rownnz / num_threads;
rest = num_rownnz - size * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (size + 1);
ne = (my_thread_num + 1) * (size + 1);
}
else
{
ns = my_thread_num * size + rest;
ne = (my_thread_num + 1) * size + rest;
}
i_diag = i_offd = 0;
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
local_j = aux_j[ii];
local_data = aux_data[ii];
diag_pos[i] = -1;
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j] - col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num - 1];
i_offd = offd_array[my_thread_num - 1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
diag_i[ii] = i_diag;
offd_i[ii] = i_offd;
local_j = aux_j[ii];
local_data = aux_data[ii];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
/* Correct diag_i and offd_i */
if (rownnz != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < (ne - 1); i++)
{
for (ii = rownnz[i] + 1; ii < rownnz[i + 1]; ii++)
{
diag_i[ii] = diag_i[rownnz[i + 1]];
offd_i[ii] = offd_i[rownnz[i + 1]];
}
}
if (my_thread_num < (num_threads - 1))
{
for (ii = rownnz[ne - 1] + 1; ii < rownnz[ne]; ii++)
{
diag_i[ii] = diag_i[rownnz[ne]];
offd_i[ii] = offd_i[rownnz[ne]];
}
}
else
{
for (ii = rownnz[ne - 1] + 1; ii < num_rows; ii++)
{
diag_i[ii] = diag_i[num_rows];
offd_i[ii] = offd_i[num_rows];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j = j0; j < diag_i[i + 1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j, 0, nnz_offd - 1);
num_cols_offd = 1;
for (i = 0; i < nnz_offd - 1; i++)
{
if (tmp_j[i + 1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i + 1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < nnz_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, big_offd_j[i], num_cols_offd);
}
if (base)
{
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
/* Generate the nonzero rows in the diag and offd matrices */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
}
/* Free memory */
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1 = 0; i1 < max_num_threads; i1++)
{
offproc_cnt[i1] = NULL;
}
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2 += 2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2 + 1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
GB_binop__iseq_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint64)
// A*D function (colscale): GB (_AxD__iseq_uint64)
// D*A function (rowscale): GB (_DxB__iseq_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint64)
// C=scalar+B GB (_bind1st__iseq_uint64)
// C=scalar+B' GB (_bind1st_tran__iseq_uint64)
// C=A+scalar GB (_bind2nd__iseq_uint64)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT64 || GxB_NO_ISEQ_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_workshare3.c | /* { dg-do compile } */
/******************************************************************************
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version
* FILE: omp_workshare3.c
* DESCRIPTION:
* This example attempts to show use of the parallel for construct. However
* it will generate errors at compile time. Try to determine what is causing
* the error. See omp_workshare4.c for a corrected version.
* SOURCE: Blaise Barney 5/99
* LAST REVISED: 03/03/2002
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#define N 50
#define CHUNKSIZE 5
main () {
int i, chunk, tid;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel for \
shared(a,b,c,chunk) \
private(i,tid) \
schedule(static,chunk)
{ /* { dg-error "expected" } */
tid = omp_get_thread_num();
for (i=0; i < N; i++)
{
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
} /* end of parallel for construct */
return 0;
}
|
main.c | /*
All changes to code are copyright, 2017, Jenniffer Estrada, jmestrada@unm.edu
Research projects -- UNM and Jenniffer Estrada
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define PI 3.14159265
typedef enum {
FILTER_AVG,
FILTER_GAUSS
} filter_type;
typedef struct {
int radius;
double **matrix;
int type;
} FILTER;
//FILTER *filter_create_avg(int radius);
//FILTER *filter_create_gauss(int radius, double sigma);
//void filter_print(FILTER *filter);
//void filter_free(FILTER *filter);
typedef struct {
unsigned char R;
unsigned char G;
unsigned char B;
} pixel;
//.ppm image
typedef struct {
char header[3];
int width, height;
int color_depth;
pixel **pixels;
} IMAGE;
// HANDLE IMAGE READ AND WRITE
IMAGE *image_load(const char *image_name) {
//Declare image struct
IMAGE *image = (IMAGE*) malloc( sizeof(IMAGE) );
//Open file
FILE *file = fopen(image_name, "r");
if(!file)
return NULL;
//Read image info
fscanf(file, "%s", image->header);
fscanf(file, "%d %d %d", &(image->width), &(image->height), &(image->color_depth));
//Alocate memory for pixels
image->pixels = (pixel**) malloc(image->height * sizeof(pixel*));
int i, j;
for(i = 0; i < image->height; i++)
image->pixels[i] = (pixel*) malloc(image->width * sizeof(pixel));
//Read pixels
for(i = 0; i < image->height; i++)
for(j = 0; j < image->width; j++)
fscanf(file, "%c%c%c", &(image->pixels[i][j].R), &(image->pixels[i][j].G), &(image->pixels[i][j].B));
printf("image height width= %d x %d \n",image->height, image->width);
//Close file
fclose(file);
return image;
}
int image_write(IMAGE *image, const char *file_name) {
//Open file
FILE *file = fopen(file_name, "w");
if(!file)
return 0;
//Write image info
fprintf(file, "%s\n%d %d\n%d", image->header, image->width, image->height, image->color_depth);
//Write pixels
int i, j;
for(i = 0; i < image->height; i++)
for(j = 0; j < image->width; j++)
fprintf(file, "%c%c%c", image->pixels[i][j].R, image->pixels[i][j].G, image->pixels[i][j].B);
//Write EOF
fprintf(file, "%d", EOF);
//Close file
fclose(file);
return 1;
}
IMAGE *image_create_blank(IMAGE *source) {
//Declare
IMAGE *image = (IMAGE*) malloc( sizeof(IMAGE) );
//Copy info(except pixels)
strcpy(image->header, source->header);
image->height = source->height;
image->width = source->width;
image->color_depth = source->color_depth;
//Alloc mem for pixels
image->pixels = (pixel**) malloc(image->height * sizeof(pixel*));
int i;
for(i = 0; i < image->height; i++)
image->pixels[i] = (pixel*) malloc(image->width * sizeof(pixel));
return image;
}
void image_free(IMAGE *image) {
//Free pixels
int i;
for(i = 0; i < image->height; i++)
free(image->pixels[i]);
free(image->pixels);
//Free image
free(image);
}
// Create filter
FILTER *filter_create_avg(int radius) {
//Allocate mem for the structure
FILTER *filter = (FILTER*) malloc(sizeof(FILTER));
filter->radius = radius;
filter->type = FILTER_AVG;
//Used for iterations
int i, j;
//The matrix width and height
int dim = 2*radius+1;
//Alocate mem for the matrix
filter->matrix = (double**) malloc(dim * sizeof(double*));
for(i = 0; i < dim; i++)
filter->matrix[i] = (double*) malloc(dim * sizeof(double));
//The value that every entry in the matrix will contain
double avg = 1.0 / (dim * dim);
//Set the values
for(i = 0; i < dim; i++)
for(j = 0; j < dim; j++)
filter->matrix[i][j] = avg;
return filter;
}
double gauss_2d(int x, int y, double sigma) {
double result = 1.0 / (2 * PI * sigma * sigma);
result *= exp(-(x*x+y*y)/(2 * sigma * sigma));
return result;
}
FILTER *filter_create_gauss(int radius, double sigma) {
//Allocate mem for the structure
FILTER *filter = (FILTER*) malloc(sizeof(FILTER));
filter->radius = radius;
filter->type = FILTER_GAUSS;
//Used for iterations
int i, j;
//The matrix width and height
int dim = 2*radius+1;
//Alocate mem for the matrix
filter->matrix = (double**) malloc(dim * sizeof(double*));
for(i = 0; i < dim; i++)
filter->matrix[i] = (double*) malloc(dim * sizeof(double));
//Calculate
double sum = 0.0;
for(i = -radius; i <= radius; i++)
for(j = -radius; j <= radius; j++) {
filter->matrix[i+radius][j+radius] = gauss_2d(j, i, sigma);
sum += filter->matrix[i+radius][j+radius];
}
//Correct so that the sum of all elements ~= 1
for(i = 0; i < 2*radius+1; i++)
for(j = 0; j < 2*radius+1; j++)
filter->matrix[i][j] /= sum;
return filter;
}
void filter_print(FILTER *filter) {
int dim = 2*filter->radius+1, i, j;
for(i = 0; i < dim; i++) {
for(j = 0; j < dim; j++)
printf("%lf ", filter->matrix[i][j]);
printf("\n");
}
}
void filter_free(FILTER *filter) {
//Free matrix
int dim=2*filter->radius+1, i;
for(i = 0; i < dim; i++)
free(filter->matrix[i]);
free(filter->matrix);
//Free filter
free(filter);
}
// Manipulate image with filter
void apply_to_pixel(int x, int y, IMAGE *original, IMAGE *result, FILTER *filter) {
if(x<filter->radius || y<filter->radius || x>=original->width-filter->radius || y>=original->height-filter->radius) {
result->pixels[y][x] = original->pixels[y][x];
return;
}
int i, j;
pixel res;
double res_R = 0;
double res_G = 0;
double res_B = 0;
double fil;
//#pragma omp for reduction(+:res_R,res_G, res_B) private(fil)
for( i = -filter->radius; i <= filter->radius; i++)
for( j = -filter->radius; j <= filter->radius; j++) {
fil = filter->matrix[i+filter->radius][j+filter->radius];
res_R += fil * original->pixels[y+i][x+j].R;
res_G += fil * original->pixels[y+i][x+j].G;
res_B += fil * original->pixels[y+i][x+j].B;
}
result->pixels[y][x].R = res_R;
result->pixels[y][x].G = res_G;
result->pixels[y][x].B = res_B;
}
IMAGE *apply_filter(IMAGE *original, FILTER *filter) {
IMAGE *result = image_create_blank(original);
int x, y;
#pragma omp parallel for
for(y = 0; y < original->height; y++)
for(x = 0; x < original->width; x++)
apply_to_pixel(x, y, original, result, filter);
return result;
}
// MAIN PROGRAM
int main(int argc, char *argv[]) {
//The image that is going to be blurred
IMAGE *image = NULL;
//The resulting image
IMAGE *result = NULL;
//The used filter
FILTER *filter;
//Info
char image_file_name[50];
char result_file_name[50];
int radius;
double sigma;
//Arguments: argv[0]="path", argv[1]="image_name.ppm", argv[2]="result_image_name.ppm" argv[3]="radius" argv[4]="sigma"
if(argc == 5) { //If enought arguments given take the info from the them
//Original image file name
strcpy(image_file_name, argv[1]);
//Result image file name
strcpy(result_file_name, argv[2]);
//Convert radius
radius = atoi(argv[3]);
//Convert sigma
sigma = atof(argv[4]);
} else { //Read info from keyboard
//Original image file name
printf("Original image name: ");
scanf("%s", image_file_name);
//Result image file name
printf("Result image name: ");
scanf("%s", result_file_name);
//Read radius
printf("Radius: ");
scanf("%d", &radius);
//Read sigma
printf("Sigma: ");
scanf("%lf", &sigma);
}
//Load image
printf("Loading image...\n");
image = image_load(image_file_name);
//Create filter
printf("Creating filter...\n");
filter = filter_create_gauss(radius, sigma);
double start, stop;
int nthreads;
//omp_set_num_threads(2);
start = omp_get_wtime();
//Apply filter
printf("Appling filter...\n");
result = apply_filter(image, filter);
stop=omp_get_wtime();
printf("Wall Time: ");
printf("%f \n", stop-start);
//Write image to disk
printf("Writing image to disk...\n");
image_write(result, result_file_name);
//Free memory
image_free(image);
image_free(result);
filter_free(filter);
printf("DONE!\n");
return 0;
}
|
GB_unop__log10_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log10_fc64_fc64)
// op(A') function: GB (_unop_tran__log10_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog10 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog10 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog10 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG10 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log10_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog10 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog10 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log10_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bxor_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint64)
// A*D function (colscale): GB (_AxD__bxor_uint64)
// D*A function (rowscale): GB (_DxB__bxor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint64)
// C=scalar+B GB (_bind1st__bxor_uint64)
// C=scalar+B' GB (_bind1st_tran__bxor_uint64)
// C=A+scalar GB (_bind2nd__bxor_uint64)
// C=A'+scalar GB (_bind2nd_tran__bxor_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT64 || GxB_NO_BXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_fc64
// A.*B function (eWiseMult): GB_AemultB__second_fc64
// A*D function (colscale): GB_AxD__second_fc64
// D*A function (rowscale): GB_DxB__second_fc64
// C+=B function (dense accum): GB_Cdense_accumB__second_fc64
// C+=b function (dense accum): GB_Cdense_accumb__second_fc64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_fc64
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_fc64
// C=A'+scalar GB_bind2nd_tran__second_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_fc64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_fc64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_fc64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_fc64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__second_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_fc64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
5_for-par0.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
void inicializa(int **v, int size) {
(*v) = (int *) malloc(sizeof(int)*size);
for (int i = 0; i < size; i++) {
(*v)[i] = rand() % 10000;
}
}
float square(int x){
int k = 0;
while(k < 5000) k++;
return sqrt(x);
}
int main(int argc, char **argv) {
srand(time(NULL));
int *vetor;
int size = 1000000;
inicializa(&vetor, size);
#pragma omp parallel
{
// divisão do trabalho
int local_init, local_end, chunk;
chunk = size / omp_get_num_threads();
local_init = omp_get_thread_num() * chunk;
local_end = (omp_get_thread_num()+1) * chunk;
if ((omp_get_num_threads()-1) == omp_get_thread_num()) local_end = size;
// calculo, cada thread responsável por seu bloco de memória
for (int i = local_init; i < local_end; i++) {
vetor[i] = square(vetor[i]);
}
}
return 0;
}
|
real_self_energy.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "real_self_energy.h"
#include <math.h>
#include <stdlib.h>
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "real_to_reciprocal.h"
static double get_real_self_energy_at_band(
const long band_index, const Darray *fc3_normal_squared,
const double fpoint, const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
static double sum_real_self_energy_at_band(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double temperature, const double cutoff_frequency);
static double sum_real_self_energy_at_band_0K(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double cutoff_frequency);
void rse_get_real_self_energy_at_bands(
double *real_self_energy, const Darray *fc3_normal_squared,
const long *band_indices, const double *frequencies,
const long (*triplets)[3], const long *triplet_weights,
const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_band0, num_band, gp0;
double fpoint;
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
gp0 = triplets[0][0];
/* num_band0 and num_band_indices have to be same. */
for (i = 0; i < num_band0; i++) {
fpoint = frequencies[gp0 * num_band + band_indices[i]];
if (fpoint < cutoff_frequency) {
real_self_energy[i] = 0;
} else {
real_self_energy[i] = get_real_self_energy_at_band(
i, fc3_normal_squared, fpoint, frequencies, triplets,
triplet_weights, epsilon, temperature, unit_conversion_factor,
cutoff_frequency);
}
}
}
void rse_get_real_self_energy_at_frequency_point(
double *real_self_energy, const double frequency_point,
const Darray *fc3_normal_squared, const long *band_indices,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_band0;
num_band0 = fc3_normal_squared->dims[1];
/* num_band0 and num_band_indices have to be same. */
for (i = 0; i < num_band0; i++) {
if (frequency_point < cutoff_frequency) {
real_self_energy[i] = 0;
} else {
real_self_energy[i] = get_real_self_energy_at_band(
i, fc3_normal_squared, frequency_point, frequencies, triplets,
triplet_weights, epsilon, temperature, unit_conversion_factor,
cutoff_frequency);
}
}
}
static double get_real_self_energy_at_band(
const long band_index, const Darray *fc3_normal_squared,
const double fpoint, const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_triplets, num_band0, num_band, gp1, gp2;
double shift;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
shift = 0;
#ifdef _OPENMP
#pragma omp parallel for private(gp1, gp2) reduction(+ : shift)
#endif
for (i = 0; i < num_triplets; i++) {
gp1 = triplets[i][1];
gp2 = triplets[i][2];
if (temperature > 0) {
shift += sum_real_self_energy_at_band(
num_band,
fc3_normal_squared->data +
i * num_band0 * num_band * num_band +
band_index * num_band * num_band,
fpoint, frequencies + gp1 * num_band,
frequencies + gp2 * num_band, epsilon, temperature,
cutoff_frequency) *
triplet_weights[i] * unit_conversion_factor;
} else {
shift +=
sum_real_self_energy_at_band_0K(
num_band,
fc3_normal_squared->data +
i * num_band0 * num_band * num_band +
band_index * num_band * num_band,
fpoint, frequencies + gp1 * num_band,
frequencies + gp2 * num_band, epsilon, cutoff_frequency) *
triplet_weights[i] * unit_conversion_factor;
}
}
return shift;
}
static double sum_real_self_energy_at_band(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double temperature, const double cutoff_frequency) {
long i, j;
double n1, n2, f1, f2, f3, f4, shift;
/* double sum; */
shift = 0;
for (i = 0; i < num_band; i++) {
if (freqs1[i] > cutoff_frequency) {
n1 = phonoc_bose_einstein(freqs1[i], temperature);
for (j = 0; j < num_band; j++) {
if (freqs2[j] > cutoff_frequency) {
n2 = phonoc_bose_einstein(freqs2[j], temperature);
f1 = fpoint + freqs1[i] + freqs2[j];
f2 = fpoint - freqs1[i] - freqs2[j];
f3 = fpoint - freqs1[i] + freqs2[j];
f4 = fpoint + freqs1[i] - freqs2[j];
/* sum = 0;
* if (fabs(f1) > epsilon) {
* sum -= (n1 + n2 + 1) / f1;
* }
* if (fabs(f2) > epsilon) {
* sum += (n1 + n2 + 1) / f2;
* }
* if (fabs(f3) > epsilon) {
* sum -= (n1 - n2) / f3;
* }
* if (fabs(f4) > epsilon) {
* sum += (n1 - n2) / f4;
* }
* shift += sum * fc3_normal_squared[i * num_band + j]; */
shift +=
(-(n1 + n2 + 1) * f1 / (f1 * f1 + epsilon * epsilon) +
(n1 + n2 + 1) * f2 / (f2 * f2 + epsilon * epsilon) -
(n1 - n2) * f3 / (f3 * f3 + epsilon * epsilon) +
(n1 - n2) * f4 / (f4 * f4 + epsilon * epsilon)) *
fc3_normal_squared[i * num_band + j];
}
}
}
}
return shift;
}
static double sum_real_self_energy_at_band_0K(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double cutoff_frequency) {
long i, j;
double f1, f2, shift;
shift = 0;
for (i = 0; i < num_band; i++) {
if (freqs1[i] > cutoff_frequency) {
for (j = 0; j < num_band; j++) {
if (freqs2[j] > cutoff_frequency) {
f1 = fpoint + freqs1[i] + freqs2[j];
f2 = fpoint - freqs1[i] - freqs2[j];
shift += (-1 * f1 / (f1 * f1 + epsilon * epsilon) +
1 * f2 / (f2 * f2 + epsilon * epsilon)) *
fc3_normal_squared[i * num_band + j];
}
}
}
}
return shift;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
bigtext.h | #ifndef AAALGO_BIGTEXT
#define AAALGO_BIGTEXT
#include <errno.h>
#include <string.h>
#include <iostream>
#include <functional>
#include <vector>
#include <string>
#include <utility>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <boost/progress.hpp>
#include <boost/assert.hpp>
#include <fcntl.h>
#ifdef USE_OPENMP
#include <omp.h>
#endif
template <typename T = void>
class BigText: public std::vector<T> {
size_t offset;
int file;
char delimiter;
size_t max_line;
size_t chunk_size;
size_t total_size;
size_t chunks;
std::vector<std::pair<size_t, size_t>> check;
public:
static char const DEFAULT_DELIMITER = '\n';
static size_t const DEFAULT_MAX_LINE = 4096;
static size_t const DEFAULT_MIN_CHUNK = 1 * 1024 * 1024;
static size_t const DEFAULT_MAX_CHUNK = 128 * 1024 * 1024;
BigText (std::string const &path,
size_t off = 0,
char del = DEFAULT_DELIMITER,
size_t ml = DEFAULT_MAX_LINE,
size_t ch = DEFAULT_MAX_CHUNK): offset(off), delimiter(del), max_line(ml), chunk_size(ch) {
file = open(path.c_str(), O_RDONLY);
BOOST_VERIFY(file >= 0);
struct stat st;
int r = fstat(file, &st);
BOOST_VERIFY(r == 0);
total_size = st.st_size; // - offset;
if (chunk_size < DEFAULT_MIN_CHUNK) chunk_size = DEFAULT_MIN_CHUNK;
BOOST_VERIFY(max_line < chunk_size);
chunks = (total_size - offset + chunk_size - 1) / chunk_size;
this->resize(chunks);
check.resize(chunks);
}
~BigText () {
close(file);
}
void blocks (std::function<void(char const *, char const *, T *)> cb) {
boost::progress_display progress(chunks, std::cerr);
#ifdef USE_OPENMP
std::vector<std::string> bufs(omp_get_max_threads());
#pragma omp parallel for schedule(dynamic, 1)
#else
std::string buf;
#endif
for (size_t i = 0; i < chunks; ++i) {
#ifdef USE_OPENMP
std::string &buf = bufs[omp_get_thread_num()];
#endif
size_t sz = chunk_size + max_line;
buf.resize(sz+1);
off_t off = offset + i * chunk_size;
ssize_t rsz = pread(file, &buf[0], sz, off);
if (rsz < 0) {
std::cerr << "pread(" << file << ',' << "..." << ',' << sz << ',' << offset + i * chunk_size << ')' << std::endl;
std::cerr << strerror(errno) << std::endl;
BOOST_VERIFY(0);
}
sz = rsz;
size_t begin = 0;
if (i) {
while ((begin < sz) && (buf[begin] != delimiter)) ++begin;
++begin;
BOOST_VERIFY(begin < max_line);
}
size_t end = chunk_size;
if (sz < chunk_size) {
end = sz;
}
else {
while ((end < sz) && (buf[end] != delimiter)) ++end;
if (end < sz) ++end;
}
check[i] = std::make_pair(off + begin, off + end);
cb(&buf[begin], &buf[end], &this->at(i));
#ifdef USE_OPENMP
#pragma omp critical
#endif
++progress;
}
BOOST_VERIFY(check[0].first == offset);
for (unsigned i = 1; i < chunks; ++i) {
BOOST_VERIFY(check[i].first == check[i-1].second);
}
BOOST_VERIFY(check.back().second == total_size);
}
void lines (std::function<void(char const *, char const *, T *, size_t)> cb) {
blocks([this, cb](char const *b, char const *e, T *state) {
size_t n = 0;
while (b < e) {
char const *le = b;
while (le < e && le[0] != delimiter) {
++le;
}
if (le < e) {
++le;
}
cb(b, le, state, n);
++n;
b = le;
}
});
}
};
#endif
|
GB_binop__lt_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8)
// A*D function (colscale): GB (_AxD__lt_int8)
// D*A function (rowscale): GB (_DxB__lt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8)
// C=scalar+B GB (_bind1st__lt_int8)
// C=scalar+B' GB (_bind1st_tran__lt_int8)
// C=A+scalar GB (_bind2nd__lt_int8)
// C=A'+scalar GB (_bind2nd_tran__lt_int8)
// C type: bool
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix_stat.h | #ifndef MATRIX_STAT_H_
#define MATRIX_STAT_H_
#include <vector>
#include <algorithm>
namespace acspo {
template <typename T>
double sum(const matrix<T> &mat)
{
unsigned int elem = mat.elem();
unsigned int count = 0;
double ret = 0;
#pragma omp parallel for reduction(+:ret,count)
for (unsigned int i = 0; i < elem; i++) {
if (!std::isnan(mat(i))) {
ret += mat(i);
count++;
}
}
if (count == 0) {
return NAN;
}
return ret;
}
template <typename T>
double mean(const matrix<T> &mat)
{
unsigned int elem = mat.elem();
unsigned int count = 0;
double ret = 0;
#pragma omp parallel for reduction(+:ret,count)
for (unsigned int i = 0; i < elem; i++) {
if (!std::isnan(mat(i))) {
ret += mat(i);
count++;
}
}
if (count == 0) {
return NAN;
}
ret /= count;
return ret;
}
template <typename T>
double var(const matrix<T> &mat, double avg)
{
unsigned int elem = mat.elem();
unsigned int count = 0;
double ret = 0;
#pragma omp parallel for reduction(+:ret,count)
for (unsigned int i = 0; i < elem; i++) {
if (!std::isnan(mat(i))) {
ret += (mat(i)-avg)*(mat(i)-avg);
count++;
}
}
if (count == 0) {
return NAN;
}
ret /= count;
return ret;
}
template <typename T>
double var(const matrix<T> &mat)
{
return var(mat, mean(mat));
}
template <typename T>
double std_dev(const matrix<T> &mat, double avg)
{
return std::sqrt(var(mat, avg));
}
template <typename T>
double std_dev(const matrix<T> &mat)
{
return std::sqrt(var(mat));
}
template <typename T>
double med(const matrix<T> &mat)
{
unsigned int elem = mat.elem();
std::vector<double> buf;
buf.reserve(elem);
for (unsigned int i = 0; i < elem; i++) {
if (!std::isnan(mat(i))) {
buf.push_back(mat(i));
}
}
if (buf.size() == 0) {
return NAN;
}
std::sort(buf.begin(), buf.end());
if (buf.size() % 2 == 1) {
return buf[(buf.size()-1)/2];
} else {
return 0.5*(buf[buf.size()/2-1]+buf[buf.size()/2]);
}
}
}
#endif
|
a.36.1.c | /* { dg-do run } */
#include <omp.h>
#include <stdlib.h>
void
do_by_16 (float *x, int iam, int ipoints)
{
}
void
a36 (float *x, int npoints)
{
int iam, ipoints;
omp_set_dynamic (0);
omp_set_num_threads (16);
#pragma omp parallel shared(x, npoints) private(iam, ipoints)
{
if (omp_get_num_threads () != 16)
abort ();
iam = omp_get_thread_num ();
ipoints = npoints / 16;
do_by_16 (x, iam, ipoints);
}
}
int main()
{
float a[10];
a36 (a, 10);
return 0;
}
|
OnDiscMSExperiment.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2021.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/INTERFACES/DataStructures.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h>
#include <vector>
#include <limits>
#include <boost/shared_ptr.hpp>
namespace OpenMS
{
/**
@brief Representation of a mass spectrometry experiment on disk.
@ingroup Kernel
@note This implementation is @a not thread-safe since it keeps internally a
single file access pointer which it moves when accessing a specific
data item. Please provide a separate copy to each thread, e.g.
@code
#pragma omp parallel for firstprivate(ondisc_map)
@endcode
*/
class OPENMS_DLLAPI OnDiscMSExperiment
{
typedef ChromatogramPeak ChromatogramPeakT;
typedef Peak1D PeakT;
public:
/**
@brief Constructor
This initializes the object, use openFile to open a file.
*/
OnDiscMSExperiment() = default;
/**
@brief Open a specific file on disk.
This tries to read the indexed mzML by parsing the index and then reading
the meta information into memory.
@return Whether the parsing of the file was successful (if false, the
file most likely was not an indexed mzML file)
*/
bool openFile(const String& filename, bool skipMetaData = false);
/// Copy constructor
OnDiscMSExperiment(const OnDiscMSExperiment& source) :
filename_(source.filename_),
indexed_mzml_file_(source.indexed_mzml_file_),
meta_ms_experiment_(source.meta_ms_experiment_)
{
}
/**
@brief Equality operator
This only checks whether the underlying file is the same and the parsed
meta-information is the same. Note that the file reader (e.g. the
std::ifstream of the file) might be in a different state.
*/
bool operator==(const OnDiscMSExperiment& rhs) const
{
if (meta_ms_experiment_ == nullptr || rhs.meta_ms_experiment_ == nullptr)
{
return filename_ == rhs.filename_ &&
meta_ms_experiment_ == rhs.meta_ms_experiment_;
}
// check if file and meta information is the same
return filename_ == rhs.filename_ &&
(*meta_ms_experiment_) == (*rhs.meta_ms_experiment_);
// do not check if indexed_mzml_file_ is equal -> they have the same filename...
}
/// Inequality operator
bool operator!=(const OnDiscMSExperiment& rhs) const
{
return !(operator==(rhs));
}
/**
@brief Checks if all spectra are sorted with respect to ascending RT
Note that we cannot check whether all spectra are sorted (except if we
were to load them all and check).
*/
bool isSortedByRT() const
{
if (!meta_ms_experiment_) return false;
return meta_ms_experiment_->isSorted(false);
}
/// alias for getNrSpectra
inline Size size() const
{
return getNrSpectra();
}
/// returns whether spectra are empty
inline bool empty() const
{
return getNrSpectra() == 0;
}
/// get the total number of spectra available
inline Size getNrSpectra() const
{
return indexed_mzml_file_.getNrSpectra();
}
/// get the total number of chromatograms available
inline Size getNrChromatograms() const
{
return indexed_mzml_file_.getNrChromatograms();
}
/// returns the meta information of this experiment (const access)
boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const
{
return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_);
}
boost::shared_ptr<PeakMap> getMetaData() const
{
return meta_ms_experiment_;
}
/// alias for getSpectrum
inline MSSpectrum operator[](Size n)
{
return getSpectrum(n);
}
/**
@brief returns a single spectrum
@param id The index of the spectrum
*/
MSSpectrum getSpectrum(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSSpectrumById(int(id));
MSSpectrum spectrum(meta_ms_experiment_->operator[](id));
indexed_mzml_file_.getMSSpectrumById(int(id), spectrum);
return spectrum;
}
/**
@brief returns a single spectrum
*/
OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id)
{
return indexed_mzml_file_.getSpectrumById((int)id);
}
/**
@brief returns a single chromatogram
@param id The index of the chromatogram
*/
MSChromatogram getChromatogram(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSChromatogramById(int(id));
MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id));
indexed_mzml_file_.getMSChromatogramById(int(id), chromatogram);
return chromatogram;
}
/**
@brief returns a single chromatogram
@param id The native identifier of the chromatogram
*/
MSChromatogram getChromatogramByNativeId(const std::string& id);
/**
@brief returns a single spectrum
@param id The native identifier of the spectrum
*/
MSSpectrum getSpectrumByNativeId(const std::string& id);
/**
@brief returns a single chromatogram
*/
OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id);
/// sets whether to skip some XML checks and be fast instead
void setSkipXMLChecks(bool skip);
private:
/// Private Assignment operator -> we cannot copy file streams in IndexedMzMLHandler
OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */);
void loadMetaData_(const String& filename);
MSChromatogram getMetaChromatogramById_(const std::string& id);
MSSpectrum getMetaSpectrumById_(const std::string& id);
protected:
/// The filename of the underlying data file
String filename_;
/// The index of the underlying data file
Internal::IndexedMzMLHandler indexed_mzml_file_;
/// The meta-data
boost::shared_ptr<PeakMap> meta_ms_experiment_;
/// Mapping of chromatogram native ids to offsets
std::unordered_map< std::string, Size > chromatograms_native_ids_;
/// Mapping of spectra native ids to offsets
std::unordered_map< std::string, Size > spectra_native_ids_;
};
typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap;
} // namespace OpenMS
|
ZQ_FaceIDPrecisionEvaluation.h | #ifndef _ZQ_FACEID_PRECISION_EVALUATION_H_
#define _ZQ_FACEID_PRECISION_EVALUATION_H_
#pragma once
#include "ZQ_FaceRecognizer.h"
#include "ZQ_FaceFeature.h"
#include "ZQ_MathBase.h"
#include "ZQ_MergeSort.h"
#include <opencv2\opencv.hpp>
#include <vector>
#include <stdlib.h>
#include <string>
#include <omp.h>
namespace ZQ
{
class ZQ_FaceIDPrecisionEvaluation
{
class EvaluationPair
{
public:
std::string fileL;
std::string nameL;
int idL;
std::string fileR;
std::string nameR;
int idR;
int flag; //-1 or 1
ZQ_FaceFeature featL;
ZQ_FaceFeature featR;
bool valid;
};
class EvaluationSingle
{
public:
std::string name;
int id;
ZQ_FaceFeature feat;
EvaluationSingle& operator = (const EvaluationSingle& v2)
{
name = v2.name;
id = v2.id;
feat.CopyData(v2.feat);
return *this;
}
bool operator < (const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
if (cmp_v < 0)
return true;
else if (cmp_v > 0)
return false;
else
{
return id < v2.id;
}
}
bool operator > (const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
if (cmp_v > 0)
return true;
else if (cmp_v < 0)
return false;
else
{
return id > v2.id;
}
}
bool operator == ( const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
return cmp_v == 0 && id == v2.id;
}
bool SameName(const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
return cmp_v == 0;
}
};
public:
static bool EvaluationOnLFW(std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& list_file, const std::string& folder, bool use_flip)
{
int recognizer_num = recognizers.size();
if (recognizer_num == 0)
return false;
int real_num_threads = __max(1, __min(recognizer_num, omp_get_num_procs() - 1));
int feat_dim = recognizers[0]->GetFeatDim();
int real_dim = use_flip ? (feat_dim * 2) : feat_dim;
printf("feat_dim = %d, real_dim = %d\n", feat_dim, real_dim);
std::vector<std::vector<EvaluationPair>> pairs;
if (!_parse_lfw_list(list_file, folder, pairs))
{
printf("failed to parse list file %s\n", list_file.c_str());
return EXIT_FAILURE;
}
printf("parse list file %s done!\n", list_file.c_str());
int part_num = pairs.size();
std::vector<std::pair<int, int>> pair_list;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < pairs[i].size(); j++)
{
pair_list.push_back(std::make_pair(i, j));
}
}
double t1 = omp_get_wtime();
if (real_num_threads == 1)
{
int handled_num = 0;
for (int nn = 0; nn < pair_list.size(); nn++)
{
handled_num++;
if (handled_num % 100 == 0)
printf("%d handled\n", handled_num);
int i = pair_list[nn].first;
int j = pair_list[nn].second;
pairs[i][j].featL.ChangeSize(real_dim);
pairs[i][j].featR.ChangeSize(real_dim);
cv::Mat imgL = cv::imread(pairs[i][j].fileL);
if (imgL.empty())
{
printf("failed to load image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
cv::Mat imgR = cv::imread(pairs[i][j].fileR);
if (imgR.empty())
{
printf("failed to load image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
if (use_flip)
{
cv::flip(imgL, imgL, 1);
cv::flip(imgR, imgR, 1);
if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData+feat_dim, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData+feat_dim, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
}
pairs[i][j].valid = true;
}
}
else
{
int handled_num = 0;
#pragma omp parallel for schedule(dynamic, 10) num_threads(real_num_threads)
for (int nn = 0; nn < pair_list.size(); nn++)
{
#pragma omp critical
{
handled_num++;
if (handled_num % 100 == 0)
{
printf("%d handled\n", handled_num);
}
}
int thread_id = omp_get_thread_num();
int i = pair_list[nn].first;
int j = pair_list[nn].second;
pairs[i][j].featL.ChangeSize(real_dim);
pairs[i][j].featR.ChangeSize(real_dim);
cv::Mat imgL = cv::imread(pairs[i][j].fileL);
if (imgL.empty())
{
#pragma omp critical
{
printf("failed to load image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
cv::Mat imgR = cv::imread(pairs[i][j].fileR);
if (imgR.empty())
{
#pragma omp critical
{
printf("failed to load image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (use_flip)
{
cv::flip(imgL, imgL, 1);
cv::flip(imgR, imgR, 1);
if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData + feat_dim, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData + feat_dim, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
}
pairs[i][j].valid = true;
}
}
printf("extract feature done!");
double t2 = omp_get_wtime();
printf("extract features cost: %.3f secs\n", t2 - t1);
int erased_num = 0;
for (int i = 0; i < part_num; i++)
{
for (int j = pairs[i].size() - 1; j >= 0; j--)
{
if (!pairs[i][j].valid)
{
pairs[i].erase(pairs[i].begin() + j);
erased_num++;
}
else
{
ZQ_MathBase::Normalize(real_dim, pairs[i][j].featL.pData);
ZQ_MathBase::Normalize(real_dim, pairs[i][j].featR.pData);
}
}
}
printf("%d pairs haved been erased\n", erased_num);
std::vector<EvaluationSingle> singles;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < pairs[i].size(); j++)
{
EvaluationSingle cur_single;
cur_single.name = pairs[i][j].nameL;
cur_single.id = pairs[i][j].idL;
cur_single.feat.CopyData(pairs[i][j].featL);
singles.push_back(cur_single);
cur_single.name = pairs[i][j].nameR;
cur_single.id = pairs[i][j].idR;
cur_single.feat.CopyData(pairs[i][j].featR);
singles.push_back(cur_single);
}
}
float ACC = _compute_accuracy(pairs);
_compute_far_tar(singles, real_num_threads);
return true;
}
private:
static float _compute_accuracy(const std::vector<std::vector<EvaluationPair>>& pairs)
{
int part_num = pairs.size();
std::vector<float> ACCs(part_num);
float ACC = 0;
for (int i = 0; i < part_num; i++)
{
std::vector<EvaluationPair> val_pairs;
for (int j = 0; j < part_num; j++)
{
if (j != i)
val_pairs.insert(val_pairs.end(), pairs[j].begin(), pairs[j].end());
}
ZQ_FaceFeature mu;
_compute_mu(val_pairs, mu);
std::vector<double> val_scores, test_scores;
_compute_scores(val_pairs, mu, val_scores);
_compute_scores(pairs[i], mu, test_scores);
double threshold = _get_threshold(val_pairs, val_scores, 10000);
ACCs[i] = _get_accuracy(pairs[i], test_scores, threshold);
ACC += ACCs[i];
printf("%d\t%2.2f%% (threshold = %f)\n", i, ACCs[i] * 100, threshold);
/*const static int BUF_LEN = 50;
char file[BUF_LEN];
sprintf_s(file, BUF_LEN, "%d_mu.txt", i);
FILE* out = 0;
fopen_s(&out, file, "w");
for (int k = 0; k < mu.length; k++)
fprintf(out, "%12.6f\n", mu.pData[k]);
fclose(out);
sprintf_s(file, BUF_LEN, "%d_validscores.txt", i);
fopen_s(&out, file, "w");
for (int k = 0; k < val_scores.size(); k++)
fprintf(out, "%12.6f\n", val_scores[k]);
fclose(out);
sprintf_s(file, BUF_LEN, "%d_testscores.txt", i);
fopen_s(&out, file, "w");
for (int k = 0; k < test_scores.size(); k++)
fprintf(out, "%12.6f\n", test_scores[k]);
fclose(out);*/
}
printf("----------------\n");
printf("AVE\t%2.2f%%\n", ACC / part_num * 100);
return ACC;
}
static bool _parse_lfw_list(const std::string& list_file, const std::string& folder, std::vector<std::vector<EvaluationPair>>& pairs)
{
FILE* in = 0;
if(0 != fopen_s(&in, list_file.c_str(), "r"))
return false;
int part_num, half_pair_num;
const static int BUF_LEN = 200;
char line[BUF_LEN];
fgets(line, BUF_LEN, in);
sscanf_s(line, "%d%d", &part_num, &half_pair_num);
pairs.resize(part_num);
std::vector<std::string> strings;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < 2 * half_pair_num; j++)
{
fgets(line, 199, in);
int len = strlen(line);
if (line[len - 1] == '\n')
line[--len] = '\0';
std::string input = line;
_split_string(input, std::string("\t"), strings);
if (strings.size() == 3)
{
EvaluationPair cur_pair;
cur_pair.nameL = strings[0];
cur_pair.nameR = strings[0];
cur_pair.idL = atoi(strings[1].c_str());
cur_pair.idR = atoi(strings[2].c_str());
char num2str[BUF_LEN];
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str()));
cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[2].c_str()));
cur_pair.fileR = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
cur_pair.flag = 1;
pairs[i].push_back(cur_pair);
}
else if (strings.size() == 4)
{
EvaluationPair cur_pair;
cur_pair.nameL = strings[0];
cur_pair.nameR = strings[2];
cur_pair.idL = atoi(strings[1].c_str());
cur_pair.idR = atoi(strings[3].c_str());
char num2str[BUF_LEN];
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str()));
cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[3].c_str()));
cur_pair.fileR = folder + "\\" + strings[2] + "\\" + strings[2] + std::string(num2str);
cur_pair.flag = -1;
pairs[i].push_back(cur_pair);
}
}
}
fclose(in);
return true;
}
static bool _compute_mu(const std::vector<EvaluationPair>& val_pairs, ZQ_FaceFeature& mu)
{
if (val_pairs.size() == 0)
return false;
int feat_dim = val_pairs[0].featL.length;
mu.ChangeSize(feat_dim);
std::vector<double> sum(feat_dim);
for (int dd = 0; dd < feat_dim; dd++)
sum[dd] = 0;
for (int i = 0; i < val_pairs.size(); i++)
{
for (int dd = 0; dd < feat_dim; dd++)
{
sum[dd] += val_pairs[i].featL.pData[dd];
sum[dd] += val_pairs[i].featR.pData[dd];
}
}
for (int dd = 0; dd < feat_dim; dd++)
{
mu.pData[dd] = sum[dd] / (2 * val_pairs.size());
}
return true;
}
static bool _compute_scores(const std::vector<EvaluationPair>& pairs, const ZQ_FaceFeature& mu, std::vector<double>& scores)
{
int num = pairs.size();
if (num == 0)
return false;
scores.resize(num);
int feat_dim = mu.length;
std::vector<double> featL(feat_dim), featR(feat_dim);
for (int i = 0; i < num; i++)
{
for (int j = 0; j < feat_dim; j++)
{
featL[j] = pairs[i].featL.pData[j] - mu.pData[j];
featR[j] = pairs[i].featR.pData[j] - mu.pData[j];
}
double lenL = 0, lenR = 0;
for (int j = 0; j < feat_dim; j++)
{
lenL += featL[j] * featL[j];
lenR += featR[j] * featR[j];
}
lenL = sqrt(lenL);
lenR = sqrt(lenR);
if (lenL != 0)
{
for (int j = 0; j < feat_dim; j++)
featL[j] /= lenL;
}
if (lenR != 0)
{
for (int j = 0; j < feat_dim; j++)
featR[j] /= lenR;
}
double sco = 0;
for (int j = 0; j < feat_dim; j++)
sco += featL[j] * featR[j];
scores[i] = sco;
}
return true;
}
static float _get_threshold(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, int thrNum)
{
std::vector<double> accurarys(2 * thrNum + 1);
for (int i = 0; i < 2 * thrNum + 1; i++)
{
double threshold = (double)i / thrNum - 1;
accurarys[i] = _get_accuracy(pairs, scores, threshold);
}
double max_acc = accurarys[0];
for (int j = 1; j < 2 * thrNum + 1; j++)
max_acc = __max(max_acc, accurarys[j]);
double sum_threshold = 0;
int sum_num = 0;
for (int i = 0; i < 2 * thrNum + 1; i++)
{
if (max_acc == accurarys[i])
{
sum_threshold += (double)i / thrNum - 1;
sum_num++;
}
}
return sum_threshold / sum_num;
}
static float _get_accuracy(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, double threshold)
{
if (pairs.size() == 0 || pairs.size() != scores.size())
return 0;
double sum = 0;
for (int i = 0; i < pairs.size(); i++)
{
if (pairs[i].flag > 0 && scores[i] > threshold || pairs[i].flag < 0 && scores[i] < threshold)
sum++;
}
return sum / pairs.size();
}
static void _split_string(const std::string& s, const std::string& delim, std::vector< std::string >& ret)
{
size_t last = 0;
size_t index = s.find_first_of(delim, last);
ret.clear();
while (index != std::string::npos)
{
ret.push_back(s.substr(last, index - last));
last = index + 1;
index = s.find_first_of(delim, last);
}
if (index - last>0)
{
ret.push_back(s.substr(last, index - last));
}
}
static void _compute_far_tar(std::vector<EvaluationSingle>& singles, int real_num_threads)
{
printf("compute far tar begin\n");
ZQ_MergeSort::MergeSort(&singles[0], singles.size(), true);
int removed_num = 0;
for (int i = singles.size() - 2; i >= 0; i--)
{
if (singles[i] == singles[i + 1])
{
singles.erase(singles.begin() + i + 1);
removed_num++;
}
}
int image_num = singles.size();
printf("%d removed, remain %d\n", removed_num, image_num);
int all_num = image_num*(image_num - 1)/2;
std::vector<float> all_scores(all_num);
std::vector<int> all_idx_i(all_num), all_idx_j(all_num);
std::vector<bool> all_flag(all_num);
std::vector<int> sort_indices(all_num);
int idx = 0;
int same_num = 0;
for (int i = 0; i < image_num; i++)
{
for (int j = i + 1; j < image_num; j++)
{
all_idx_i[idx] = i;
all_idx_j[idx] = j;
bool is_same = singles[i].SameName(singles[j]);
all_flag[idx] = is_same;
if (is_same)
same_num++;
sort_indices[idx] = idx;
idx++;
}
}
int notsame_num = all_num - same_num;
printf("all_num = %d, same_num = %d, notsame_num = %d\n", all_num, same_num, notsame_num);
double t1 = omp_get_wtime();
int dim = singles[0].feat.length;
if (real_num_threads == 1)
{
for (int n = 0; n < all_num; n++)
{
int i = all_idx_i[n];
int j = all_idx_j[n];
all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData);
}
}
else
{
int chunk_size = (all_num + real_num_threads - 1) / real_num_threads;
#pragma omp parallel for schedule(static, chunk_size) num_threads(real_num_threads)
for (int n = 0; n < all_num; n++)
{
int i = all_idx_i[n];
int j = all_idx_j[n];
all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData);
}
}
double t2 = omp_get_wtime();
printf("compute all scores cost: %.3f secs\n", t2 - t1);
ZQ_MergeSort::MergeSort(&all_scores[0], &sort_indices[0], all_num, false);
double t3 = omp_get_wtime();
printf("sort all scores cost: %.3f secs\n", t3 - t2);
const int stage_num = 4;
double far_num[stage_num] =
{
1e-6 * notsame_num,
1e-5 * notsame_num,
1e-4 * notsame_num,
1e-3 * notsame_num
};
int cur_far_num = 0;
int cur_tar_num = 0;
int cur_stage = 0;
for (int i = 0; i < all_num; i++)
{
if (cur_stage >= stage_num)
break;
int sort_id = sort_indices[i];
if (all_flag[sort_id])
{
cur_tar_num++;
}
else
{
cur_far_num++;
}
if (cur_far_num > far_num[cur_stage])
{
printf("thresh = %.5f far = %15e, tar = %15f\n", all_scores[i],
(double)cur_far_num / notsame_num, (double)cur_tar_num / same_num);
cur_stage++;
}
}
}
};
}
#endif
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE
#endif
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifndef _TOOL_PREFIX
#define _TOOL_PREFIX ""
// If no _TOOL_PREFIX is set, we assume that we run as part of an OMPT test
#define _OMPT_TESTS
#endif
static const char *ompt_thread_t_values[] = {
"ompt_thread_UNDEFINED", "ompt_thread_initial", "ompt_thread_worker",
"ompt_thread_other"};
static const char *ompt_task_status_t_values[] = {
"ompt_task_UNDEFINED",
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch", // 7
"ompt_taskwait_complete" // 8
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static const char *ompt_dependence_type_t_values[] = {
"ompt_dependence_type_UNDEFINED",
"ompt_dependence_type_in", // 1
"ompt_dependence_type_out", // 2
"ompt_dependence_type_inout", // 3
"ompt_dependence_type_mutexinoutset", // 4
"ompt_dependence_type_source", // 5
"ompt_dependence_type_sink", // 6
"ompt_dependence_type_inoutset" // 7
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_taskwait)
progress += sprintf(progress, "ompt_task_taskwait");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_task_memory_t ompt_get_task_memory;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_finalize_tool_t ompt_finalize_tool;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts a second NOP instruction (another 4 bytes). For non-void runtime
// functions Clang inserts a STW instruction (but only if compiling under
// -fno-PIC which will be the default with Clang 8.0, another 4 bytes).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8, ((char *)addr) - 12)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#elif KMP_ARCH_RISCV64
#if __riscv_compressed
// On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10)
#else
// On RV64G the NOP instruction is 4 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12)
#endif
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
#define register_ompt_callback_t(name, type) \
do { \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
} while (0)
#define register_ompt_callback(name) register_ompt_callback_t(name, name##_t)
#ifndef USE_PRIVATE_TOOL
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_last: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_prev: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_reduction(ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_flush: codeptr_ra=%p\n",
thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_cancel: task_data=%" PRIu64
", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, task_data->value, first_flag_value,
second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(flags & ompt_task_initial)
{
char buffer[2048];
format_task_type(flags, buffer);
// Only check initial task not created by teams construct
if (team_size == 1 && thread_num == 1 && parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 ", flags=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num, flags);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num);
}
break;
case ompt_scope_end:
if(flags & ompt_task_initial){
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_nest_lock: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_masked(ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64
", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams";
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_%s_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_%s=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, event, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value, size,
requested_team_size, codeptr_ra, invoker);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, event, parallel_data->value,
encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
printf(
"%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_create: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"new_task_id=%" PRIu64
", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n",
ompt_get_thread_data()->value,
encountering_task_data ? encountering_task_data->value : 0,
encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL,
encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL,
new_task_data->value, codeptr_ra, buffer, type,
has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_schedule: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n",
ompt_get_thread_data()->value, first_task_data->value,
(second_task_data ? second_task_data->value : -1),
ompt_task_status_t_values[prior_task_status], prior_task_status);
if (prior_task_status == ompt_task_complete ||
prior_task_status == ompt_task_late_fulfill ||
prior_task_status == ompt_taskwait_complete) {
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_end: task_id=%" PRIu64
"\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
char buffer[2048];
char *progress = buffer;
int i;
for (i = 0; i < ndeps && progress < buffer + 2000; i++) {
if (deps[i].dependence_type == ompt_dependence_type_source ||
deps[i].dependence_type == ompt_dependence_type_sink)
progress +=
sprintf(progress, "(%" PRIu64 ", %s), ", deps[i].variable.value,
ompt_dependence_type_t_values[deps[i].dependence_type]);
else
progress +=
sprintf(progress, "(%p, %s), ", deps[i].variable.ptr,
ompt_dependence_type_t_values[deps[i].dependence_type]);
}
if (ndeps > 0)
progress[-2] = 0;
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_dependences: task_id=%" PRIu64
", deps=[%s], ndeps=%d\n",
ompt_get_thread_data()->value, task_data->value, buffer, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_dependence_pair: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, first_task_data->value,
second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_end: thread_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_control_tool: command=%" PRIu64
", modifier=%" PRIu64
", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, "
"current_task_frame.reenter=%p \n",
ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra,
omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
// the following would interfere with expected output for OMPT tests, so skip
#ifndef _OMPT_TESTS
// print task data
int task_level = 0;
ompt_data_t *task_data;
while (ompt_get_task_info(task_level, NULL, (ompt_data_t **)&task_data, NULL,
NULL, NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " task level %d: task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, task_level, task_data->value);
task_level++;
}
// print parallel data
int parallel_level = 0;
ompt_data_t *parallel_data;
while (ompt_get_parallel_info(parallel_level, (ompt_data_t **)¶llel_data,
NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " parallel level %d: parallel_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_level, parallel_data->value);
parallel_level++;
}
#endif
return 0; //success
}
static void on_ompt_callback_error(ompt_severity_t severity,
const char *message, size_t length,
const void *codeptr_ra) {
printf("%" PRIu64 ": ompt_event_runtime_error: severity=%" PRIu32
", message=%s, length=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, severity, message, (uint64_t)length,
codeptr_ra);
}
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool");
ompt_get_unique_id();
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_ompt_callback(ompt_callback_mutex_acquire);
register_ompt_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_ompt_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_nest_lock);
register_ompt_callback(ompt_callback_sync_region);
register_ompt_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_ompt_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t);
register_ompt_callback(ompt_callback_control_tool);
register_ompt_callback(ompt_callback_flush);
register_ompt_callback(ompt_callback_cancel);
register_ompt_callback(ompt_callback_implicit_task);
register_ompt_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_ompt_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_work);
register_ompt_callback(ompt_callback_masked);
register_ompt_callback(ompt_callback_parallel_begin);
register_ompt_callback(ompt_callback_parallel_end);
register_ompt_callback(ompt_callback_task_create);
register_ompt_callback(ompt_callback_task_schedule);
register_ompt_callback(ompt_callback_dependences);
register_ompt_callback(ompt_callback_task_dependence);
register_ompt_callback(ompt_callback_thread_begin);
register_ompt_callback(ompt_callback_thread_end);
register_ompt_callback(ompt_callback_error);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
#endif // ifndef USE_PRIVATE_TOOL
#ifdef _OMPT_TESTS
#undef _OMPT_TESTS
#endif
|
rose_matrixmultiply.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#define N 1000
#define M 1000
#define K 1000
#include "omp.h"
int i;
int j;
int k;
double a[1000][1000];
double b[1000][1000];
double c[1000][1000];
int mmm()
{
//#pragma omp parallel for private(i,j,k) shared(a,b,c)
#pragma omp parallel for private (i,j,k)
for (i = 0; i <= 999; i += 1) {
#pragma omp parallel for private (j,k)
for (j = 0; j <= 999; j += 1) {
for (k = 0; k <= 999; k += 1) {
c[i][j] = c[i][j] + a[i][k] * b[k][j];
}
}
}
return 0;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(16*t3+Nx+3,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),8*t4+6);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
convolution_sgemm_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to1_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size / 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x12
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
img0 += size * 4;
tmpptr += 48;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
img0 += size * 4;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
img0 += size * 4;
tmpptr += 16;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(img0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
int nn_outch = outch / 4;
int remain_outch_start = nn_outch * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum4 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum5 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum6 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum7 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum8 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum9 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _suma = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sumb = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 96);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val2, (v4f32)__msa_splati_w(_w0123, 0));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum5 = __msa_fmadd_w(_sum5, _val2, (v4f32)__msa_splati_w(_w0123, 1));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum8 = __msa_fmadd_w(_sum8, _val2, (v4f32)__msa_splati_w(_w0123, 2));
_sum9 = __msa_fmadd_w(_sum9, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_suma = __msa_fmadd_w(_suma, _val1, (v4f32)__msa_splati_w(_w0123, 3));
_sumb = __msa_fmadd_w(_sumb, _val2, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 12;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
__msa_st_w((v4i32)_sum3, outptr1, 0);
__msa_st_w((v4i32)_sum4, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum5, outptr1 + 8, 0);
__msa_st_w((v4i32)_sum6, outptr2, 0);
__msa_st_w((v4i32)_sum7, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum8, outptr2 + 8, 0);
__msa_st_w((v4i32)_sum9, outptr3, 0);
__msa_st_w((v4i32)_suma, outptr3 + 4, 0);
__msa_st_w((v4i32)_sumb, outptr3 + 8, 0);
outptr0 += 12;
outptr1 += 12;
outptr2 += 12;
outptr3 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum4 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum5 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum6 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum7 = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum3 = __msa_fmadd_w(_sum3, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum5 = __msa_fmadd_w(_sum5, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 8;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr1, 0);
__msa_st_w((v4i32)_sum3, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum4, outptr2, 0);
__msa_st_w((v4i32)_sum5, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum6, outptr3, 0);
__msa_st_w((v4i32)_sum7, outptr3 + 4, 0);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(biasptr, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 4;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 8);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*tmpptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
kptr0 += 4;
}
outptr0[0] = _sum[0];
outptr1[0] = _sum[1];
outptr2[0] = _sum[2];
outptr3[0] = _sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
v4f32 _sum2 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
_sum2 = __msa_fmadd_w(_sum2, _w0, _val2);
tmpptr += 12;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
outptr0 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
tmpptr += 8;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
tmpptr += 4;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 16);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
tmpptr += 4;
kptr0 += 4;
}
sum0 += __msa_fhadd_w(_sum0);
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to1_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = pb-pa-maxk-inch/pa-outch/pb
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(4 * 4 * maxk, inch / 4, outch / 4 + outch % 4, 4u);
int q = 0;
for (; q + (4 - 1) < outch; q += 4)
{
float* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = k0.row(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
__msa_st_w((v4i32)_val, ptr, 0);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to1_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
MiniBow2.h | /**
* Original File: TemplatedVocabulary.h
* Original Author: Dorian Galvez-Lopez
*
* Modified by: Darius Rückert
* Modifications:
* - Moved everything into this single header file
* - Removed support for non-ORB feature descriptors
* - Optimized loading, saving, matching
* - Removed dependency to opencv
*
* Original License: BSD-like
* https://github.com/dorian3d/DBoW2/blob/master/LICENSE.txt
* License of modifications: MIT
* https://github.com/darglein/DBoW2/blob/master/LICENSE.txt
*
*/
#pragma once
#include "saiga/core/time/all.h"
#include "saiga/core/util/BinaryFile.h"
#include "saiga/vision/features/Features.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <map>
#include <numeric>
#include <string>
#include <vector>
namespace MiniBow2
{
using WordId = int;
using WordValue = float;
using NodeId = int;
using Descriptor = Saiga::DescriptorORB;
class BowVector : public std::vector<std::pair<WordId, WordValue>>
// class BowVector : public std::map<WordId, WordValue>
{
public:
void set(std::vector<std::pair<WordId, WordValue>>& words)
{
#if 1
reserve(words.size());
std::sort(words.begin(), words.end(), [](const auto& a, const auto& b) { return a.first < b.first; });
WordId current = -1;
for (auto& f : words)
{
// SAIGA_ASSERT(f.first >= 0);
if (f.first == -1) continue;
if (f.first != current)
{
push_back({f.first, f.second});
current = f.first;
}
else
{
back().second += (f.second);
}
}
#else
for (auto& f : words)
{
this->operator[](f.first) += f.second;
}
#endif
normalize();
}
/**
* L1-Normalizes the values in the vector
* @param norm_type norm used
*/
void normalize()
{
WordValue norm = 0.0;
{
for (auto it = begin(); it != end(); ++it) norm += std::abs(it->second);
}
if (norm > 0.0)
{
for (auto it = begin(); it != end(); ++it) it->second /= norm;
}
}
};
class FeatureVector : public std::vector<std::pair<NodeId, std::vector<int>>>
// class FeatureVector : public std::map<NodeId, std::vector<int>>
{
public:
void setFeatures(std::vector<std::pair<NodeId, int>>& features)
{
#if 1
reserve(features.size());
std::sort(features.begin(), features.end(), [](const auto& a, const auto& b) { return a.first < b.first; });
NodeId current = -1;
for (auto& f : features)
{
if (f.first == -1) continue;
SAIGA_ASSERT(f.first >= 0);
if (f.first != current)
{
push_back({f.first, { f.second }});
current = f.first;
}
else
{
back().second.push_back(f.second);
}
}
#else
for (auto& f : features)
{
this->operator[](f.first).push_back(f.second);
}
#endif
}
static WordValue score(const BowVector& v1, const BowVector& v2)
{
BowVector::const_iterator v1_it, v2_it;
const BowVector::const_iterator v1_end = v1.end();
const BowVector::const_iterator v2_end = v2.end();
v1_it = v1.begin();
v2_it = v2.begin();
WordValue score = 0;
while (v1_it != v1_end && v2_it != v2_end)
{
const WordValue& vi = v1_it->second;
const WordValue& wi = v2_it->second;
if (v1_it->first == v2_it->first)
{
score += std::abs(vi - wi) - std::abs(vi) - std::abs(wi);
++v1_it;
++v2_it;
}
else if (v1_it->first < v2_it->first)
{
v1_it = std::lower_bound(v1_it, v1.end(), *v2_it,
[](const auto& a, const auto& b) { return a.first < b.first; });
}
else
{
v2_it = std::lower_bound(v2_it, v2.end(), *v1_it,
[](const auto& a, const auto& b) { return a.first < b.first; });
}
}
score = score * WordValue(-0.5);
return score;
}
};
template <class Descriptor>
class TemplatedVocabulary
{
public:
using Scoring = FeatureVector;
/**
* Initiates an empty vocabulary
* @param k branching factor
* @param L depth levels
* @param weighting weighting type
* @param scoring scoring type
*/
TemplatedVocabulary(int k = 10, int L = 5) : m_k(k), m_L(L) {}
/**
* Creates the vocabulary by loading a file
* @param filename
*/
TemplatedVocabulary(const std::string& filename) { loadRaw(filename); }
/**
* Creates a vocabulary from the training features with the already
* defined parameters
* @param training_features
*/
void create(const std::vector<std::vector<Descriptor>>& training_features);
/**
* Creates a vocabulary from the training features, setting the branching
* factor and the depth levels of the tree
* @param training_features
* @param k branching factor
* @param L depth levels
*/
void create(const std::vector<std::vector<Descriptor>>& training_features, int k, int L)
{
m_k = k;
m_L = L;
create(training_features);
}
/**
* Returns the number of words in the vocabulary
* @return number of words
*/
inline unsigned int size() const { return m_words.size(); }
/**
* Returns whether the vocabulary is empty (i.e. it has not been trained)
* @return true iff the vocabulary is empty
*/
inline bool empty() const { return m_words.empty(); }
/**
* Transforms a set of descriptores into a bow vector
* @param features
* @param v (out) bow vector of weighted words
*/
// void transform(const std::vector<TDescriptor>& features, BowVector& v) const;
/**
* Transform a set of descriptors into a bow vector and a feature vector
* @param features
* @param v (out) bow vector
* @param fv (out) feature vector of nodes and feature indexes
* @param levelsup levels to go up the vocabulary tree to get the node index
*/
void transform(const std::vector<Descriptor>& features, BowVector& v, FeatureVector& fv, int levelsup,
int num_threads = 1) const;
/**
* Returns the score of two vectors
* @param a vector
* @param b vector
* @return score between vectors
* @note the vectors must be already sorted and normalized if necessary
*/
inline WordValue score(const BowVector& a, const BowVector& b) const { return Scoring::score(a, b); }
/**
* Returns the id of the node that is "levelsup" levels from the word given
* @param wid word id
* @param levelsup 0..L
* @return node id. if levelsup is 0, returns the node id associated to the
* word id
*/
NodeId getParentNode(WordId wid, int levelsup) const;
/**
* Returns the ids of all the words that are under the given node id,
* by traversing any of the branches that goes down from the node
* @param nid starting node id
* @param words ids of words
*/
void getWordsFromNode(NodeId nid, std::vector<WordId>& words) const;
/**
* Returns the branching factor of the tree (k)
* @return k
*/
inline int getBranchingFactor() const { return m_k; }
/**
* Returns the depth levels of the tree (L)
* @return L
*/
inline int getDepthLevels() const { return m_L; }
/**
* Returns the real depth levels of the tree on average
* @return average of depth levels of leaves
*/
float getEffectiveLevels() const;
/**
* Returns the descriptor of a word
* @param wid word id
* @return descriptor
*/
inline Descriptor getWord(WordId wid) const { return m_words[wid]->descriptor; }
/**
* Returns the weight of a word
* @param wid word id
* @return weight
*/
inline WordValue getWordWeight(WordId wid) const { return m_words[wid]->weight; }
/**
* Changes the scoring method
* @param type new scoring type
*/
void saveRaw(const std::string& file) const;
void loadRaw(const std::string& file);
protected:
/// Pointer to descriptor
typedef const Descriptor* pDescriptor;
/// Tree node
struct Node
{
/// Node id
NodeId id;
/// Weight if the node is a word
WordValue weight;
/// Children
std::vector<NodeId> children;
/// Parent node (undefined in case of root)
NodeId parent;
/// Node descriptor
Descriptor descriptor;
/// Word id if the node is a word
WordId word_id;
/**
* Empty constructor
*/
Node() : id(0), weight(0), parent(0), word_id(0) {}
/**
* Constructor
* @param _id node id
*/
Node(NodeId _id) : id(_id), weight(0), parent(0), word_id(0) {}
/**
* Returns whether the node is a leaf node
* @return true iff the node is a leaf
*/
inline bool isLeaf() const { return children.empty(); }
};
protected:
/**
* Returns a set of pointers to descriptores
* @param training_features all the features
* @param features (out) pointers to the training features
*/
void getFeatures(const std::vector<std::vector<Descriptor>>& training_features,
std::vector<pDescriptor>& features) const;
/**
* Returns the word id associated to a feature
* @param feature
* @param id (out) word id
* @param weight (out) word weight
* @param nid (out) if given, id of the node "levelsup" levels up
* @param levelsup
*/
std::tuple<WordId, WordValue, NodeId> transform(const Descriptor& feature, int levelsup) const;
/**
* Creates a level in the tree, under the parent, by running kmeans with
* a descriptor set, and recursively creates the subsequent levels too
* @param parent_id id of parent node
* @param descriptors descriptors to run the kmeans on
* @param current_level current level in the tree
*/
void HKmeansStep(NodeId parent_id, const std::vector<pDescriptor>& descriptors, int current_level);
/**
* Creates k clusters from the given descriptors with some seeding algorithm.
* @note In this class, kmeans++ is used, but this function should be
* overriden by inherited classes.
*/
void initiateClusters(const std::vector<pDescriptor>& descriptors, std::vector<Descriptor>& clusters) const
{
initiateClustersKMpp(descriptors, clusters);
}
/**
* Creates k clusters from the given descriptor sets by running the
* initial step of kmeans++
* @param descriptors
* @param clusters resulting clusters
*/
void initiateClustersKMpp(const std::vector<pDescriptor>& descriptors, std::vector<Descriptor>& clusters) const;
/**
* Create the words of the vocabulary once the tree has been built
*/
void createWords();
/**
* Sets the weights of the nodes of tree according to the given features.
* Before calling this function, the nodes and the words must be already
* created (by calling HKmeansStep and createWords)
* @param features
*/
void setNodeWeights(const std::vector<std::vector<Descriptor>>& features);
/**
* Returns a random number in the range [min..max]
* @param min
* @param max
* @return random T number in [min..max]
*/
template <class T>
static T RandomValue(T min, T max)
{
return ((T)rand() / (T)RAND_MAX) * (max - min) + min;
}
/**
* Returns a random int in the range [min..max]
* @param min
* @param max
* @return random int in [min..max]
*/
static int RandomInt(int min, int max)
{
int d = max - min + 1;
return int(((double)rand() / ((double)RAND_MAX + 1.0)) * d) + min;
}
protected:
/// Branching factor
int m_k;
/// Depth levels
int m_L;
/// Tree nodes
std::vector<Node> m_nodes;
/// Words of the vocabulary (tree leaves)
/// this condition holds: m_words[wid]->word_id == wid
std::vector<Node*> m_words;
mutable std::vector<std::pair<WordId, WordValue>> tmp_bow_data;
mutable std::vector<std::pair<NodeId, int>> tmp_feature_data;
};
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::create(const std::vector<std::vector<Descriptor>>& training_features)
{
m_nodes.clear();
m_words.clear();
// expected_nodes = Sum_{i=0..L} ( k^i )
int expected_nodes = (int)((std::pow((double)m_k, (double)m_L + 1) - 1) / (m_k - 1));
m_nodes.reserve(expected_nodes); // avoid allocations when creating the tree
std::vector<pDescriptor> features;
getFeatures(training_features, features);
// create root
m_nodes.push_back(Node(0)); // root
// create the tree
HKmeansStep(0, features, 1);
// create the words
createWords();
// and set the weight of each node of the tree
setNodeWeights(training_features);
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::getFeatures(const std::vector<std::vector<Descriptor>>& training_features,
std::vector<pDescriptor>& features) const
{
features.resize(0);
typename std::vector<std::vector<Descriptor>>::const_iterator vvit;
typename std::vector<Descriptor>::const_iterator vit;
for (vvit = training_features.begin(); vvit != training_features.end(); ++vvit)
{
features.reserve(features.size() + vvit->size());
for (vit = vvit->begin(); vit != vvit->end(); ++vit)
{
features.push_back(&(*vit));
}
}
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::HKmeansStep(NodeId parent_id, const std::vector<pDescriptor>& descriptors,
int current_level)
{
if (descriptors.empty()) return;
// features associated to each cluster
std::vector<Descriptor> clusters;
std::vector<std::vector<unsigned int>> groups; // groups[i] = [j1, j2, ...]
// j1, j2, ... indices of descriptors associated to cluster i
clusters.reserve(m_k);
groups.reserve(m_k);
// const int msizes[] = { m_k, descriptors.size() };
// cv::SparseMat assoc(2, msizes, CV_8U);
// cv::SparseMat last_assoc(2, msizes, CV_8U);
//// assoc.row(cluster_idx).col(descriptor_idx) = 1 iif associated
if ((int)descriptors.size() <= m_k)
{
// trivial case: one cluster per feature
groups.resize(descriptors.size());
for (unsigned int i = 0; i < descriptors.size(); i++)
{
groups[i].push_back(i);
#ifdef USE_CV_FORB
clusters.push_back(descriptors[i]->clone());
#else
clusters.push_back(*descriptors[i]);
#endif
}
}
else
{
// select clusters and groups with kmeans
bool first_time = true;
bool goon = true;
// to check if clusters move after iterations
std::vector<int> last_association, current_association;
while (goon)
{
// 1. Calculate clusters
if (first_time)
{
// random sample
initiateClusters(descriptors, clusters);
}
else
{
// calculate cluster centres
for (unsigned int c = 0; c < clusters.size(); ++c)
{
std::vector<pDescriptor> cluster_descriptors;
cluster_descriptors.reserve(groups[c].size());
/*
for(unsigned int d = 0; d < descriptors.size(); ++d)
{
if( assoc.find<unsigned char>(c, d) )
{
cluster_descriptors.push_back(descriptors[d]);
}
}
*/
std::vector<unsigned int>::const_iterator vit;
for (vit = groups[c].begin(); vit != groups[c].end(); ++vit)
{
cluster_descriptors.push_back(descriptors[*vit]);
}
Saiga::MeanMatcher<Descriptor> mm;
clusters[c] = mm.MeanDescriptorp(cluster_descriptors);
// clusters[c] = F::meanValue(cluster_descriptors);
}
} // if(!first_time)
// 2. Associate features with clusters
// calculate distances to cluster centers
groups.clear();
groups.resize(clusters.size(), std::vector<unsigned int>());
current_association.resize(descriptors.size());
// assoc.clear();
typename std::vector<pDescriptor>::const_iterator fit;
// unsigned int d = 0;
for (fit = descriptors.begin(); fit != descriptors.end(); ++fit) //, ++d)
{
auto best_dist = Saiga::distance(*(*fit), clusters[0]);
unsigned int icluster = 0;
for (unsigned int c = 1; c < clusters.size(); ++c)
{
auto dist = Saiga::distance(*(*fit), clusters[c]);
if (dist < best_dist)
{
best_dist = dist;
icluster = c;
}
}
// assoc.ref<unsigned char>(icluster, d) = 1;
groups[icluster].push_back(fit - descriptors.begin());
current_association[fit - descriptors.begin()] = icluster;
}
// kmeans++ ensures all the clusters has any feature associated with them
// 3. check convergence
if (first_time)
{
first_time = false;
}
else
{
// goon = !eqUChar(last_assoc, assoc);
goon = false;
for (unsigned int i = 0; i < current_association.size(); i++)
{
if (current_association[i] != last_association[i])
{
goon = true;
break;
}
}
}
if (goon)
{
// copy last feature-cluster association
last_association = current_association;
// last_assoc = assoc.clone();
}
} // while(goon)
} // if must run kmeans
// create nodes
for (unsigned int i = 0; i < clusters.size(); ++i)
{
NodeId id = m_nodes.size();
m_nodes.push_back(Node(id));
m_nodes.back().descriptor = clusters[i];
m_nodes.back().parent = parent_id;
m_nodes[parent_id].children.push_back(id);
}
// go on with the next level
if (current_level < m_L)
{
// iterate again with the resulting clusters
const std::vector<NodeId>& children_ids = m_nodes[parent_id].children;
for (unsigned int i = 0; i < clusters.size(); ++i)
{
NodeId id = children_ids[i];
std::vector<pDescriptor> child_features;
child_features.reserve(groups[i].size());
std::vector<unsigned int>::const_iterator vit;
for (vit = groups[i].begin(); vit != groups[i].end(); ++vit)
{
child_features.push_back(descriptors[*vit]);
}
if (child_features.size() > 1)
{
HKmeansStep(id, child_features, current_level + 1);
}
}
}
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::initiateClustersKMpp(const std::vector<pDescriptor>& pfeatures,
std::vector<Descriptor>& clusters) const
{
// Implements kmeans++ seeding algorithm
// Algorithm:
// 1. Choose one center uniformly at random from among the data points.
// 2. For each data point x, compute D(x), the distance between x and the nearest
// center that has already been chosen.
// 3. Add one new data point as a center. Each point x is chosen with probability
// proportional to D(x)^2.
// 4. Repeat Steps 2 and 3 until k centers have been chosen.
// 5. Now that the initial centers have been chosen, proceed using standard k-means
// clustering.
clusters.resize(0);
clusters.reserve(m_k);
std::vector<double> min_dists(pfeatures.size(), std::numeric_limits<double>::max());
// 1.
int ifeature = RandomInt(0, pfeatures.size() - 1);
// create first cluster
#ifdef USE_CV_FORB
clusters.push_back(pfeatures[ifeature]->clone());
#else
clusters.push_back(*pfeatures[ifeature]);
#endif
// compute the initial distances
typename std::vector<pDescriptor>::const_iterator fit;
std::vector<double>::iterator dit;
dit = min_dists.begin();
for (fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit)
{
*dit = Saiga::distance(*(*fit), clusters.back());
}
while ((int)clusters.size() < m_k)
{
// 2.
dit = min_dists.begin();
for (fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit)
{
if (*dit > 0)
{
auto dist = Saiga::distance(*(*fit), clusters.back());
if (dist < *dit) *dit = dist;
}
}
// 3.
double dist_sum = std::accumulate(min_dists.begin(), min_dists.end(), 0.0);
if (dist_sum > 0)
{
double cut_d;
do
{
cut_d = RandomValue<double>(0, dist_sum);
} while (cut_d == 0.0);
double d_up_now = 0;
for (dit = min_dists.begin(); dit != min_dists.end(); ++dit)
{
d_up_now += *dit;
if (d_up_now >= cut_d) break;
}
if (dit == min_dists.end())
ifeature = pfeatures.size() - 1;
else
ifeature = dit - min_dists.begin();
#ifdef USE_CV_FORB
clusters.push_back(pfeatures[ifeature]->clone());
#else
clusters.push_back(*pfeatures[ifeature]);
#endif
} // if dist_sum > 0
else
break;
} // while(used_clusters < m_k)
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::createWords()
{
m_words.resize(0);
if (!m_nodes.empty())
{
m_words.reserve((int)pow((double)m_k, (double)m_L));
typename std::vector<Node>::iterator nit;
nit = m_nodes.begin(); // ignore root
for (++nit; nit != m_nodes.end(); ++nit)
{
if (nit->isLeaf())
{
nit->word_id = m_words.size();
m_words.push_back(&(*nit));
}
}
}
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::setNodeWeights(const std::vector<std::vector<Descriptor>>& training_features)
{
const unsigned int NWords = m_words.size();
const unsigned int NDocs = training_features.size();
// IDF and TF-IDF: we calculte the idf path now
// Note: this actually calculates the idf part of the tf-idf score.
// The complete tf-idf score is calculated in ::transform
std::vector<unsigned int> Ni(NWords, 0);
std::vector<bool> counted(NWords, false);
typename std::vector<std::vector<Descriptor>>::const_iterator mit;
typename std::vector<Descriptor>::const_iterator fit;
for (mit = training_features.begin(); mit != training_features.end(); ++mit)
{
fill(counted.begin(), counted.end(), false);
for (fit = mit->begin(); fit < mit->end(); ++fit)
{
WordId word_id = std::get<0>(transform(*fit, 0));
if (!counted[word_id])
{
Ni[word_id]++;
counted[word_id] = true;
}
}
}
// set ln(N/Ni)
for (unsigned int i = 0; i < NWords; i++)
{
if (Ni[i] > 0)
{
m_words[i]->weight = log((double)NDocs / (double)Ni[i]);
} // else // This cannot occur if using kmeans++
}
}
// --------------------------------------------------------------------------
template <class Descriptor>
float TemplatedVocabulary<Descriptor>::getEffectiveLevels() const
{
long sum = 0;
typename std::vector<Node*>::const_iterator wit;
for (wit = m_words.begin(); wit != m_words.end(); ++wit)
{
const Node* p = *wit;
for (; p->id != 0; sum++) p = &m_nodes[p->parent];
}
return (float)((double)sum / (double)m_words.size());
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::transform(const std::vector<Descriptor>& features, BowVector& v,
FeatureVector& fv, int levelsup, int num_threads) const
{
SAIGA_ASSERT(num_threads > 0);
int N = features.size();
tmp_bow_data.resize(N);
tmp_feature_data.resize(N);
v.clear();
fv.clear();
#pragma omp parallel num_threads(num_threads)
{
#pragma omp for
for (int i = 0; i < N; ++i)
{
auto [word_id, weight, nid] = transform(features[i], levelsup);
if (weight > 0)
{
tmp_bow_data[i] = {word_id, weight};
tmp_feature_data[i] = {nid, i};
}
else
{
tmp_bow_data[i] = {-1, weight};
tmp_feature_data[i] = {-1, i};
}
}
#ifdef WIN32
# pragma omp single
{
v.set(tmp_bow_data);
fv.setFeatures(tmp_feature_data);
}
#else
# pragma omp single
{
# pragma omp task
{
v.set(tmp_bow_data);
}
fv.setFeatures(tmp_feature_data);
}
#endif
}
}
// --------------------------------------------------------------------------
template <class Descriptor>
std::tuple<WordId, WordValue, NodeId> TemplatedVocabulary<Descriptor>::transform(const Descriptor& feature,
int levelsup) const
{
// propagate the feature down the tree
// std::vector<NodeId> nodes;
// typename std::vector<NodeId>::const_iterator nit;
// level at which the node must be stored in nid, if given
const int nid_level = m_L - levelsup;
NodeId nid = 0;
NodeId final_id = 0; // root
int current_level = 0;
do
{
++current_level;
auto& nodes = m_nodes[final_id].children;
final_id = nodes[0];
auto best_d = Saiga::distance(feature, m_nodes[final_id].descriptor);
for (auto nit = nodes.begin() + 1; nit != nodes.end(); ++nit)
{
NodeId id = *nit;
auto d = Saiga::distance(feature, m_nodes[id].descriptor);
if (d < best_d)
{
best_d = d;
final_id = id;
}
}
if (current_level == nid_level) nid = final_id;
} while (!m_nodes[final_id].isLeaf());
// turn node id into word id
WordId word_id = m_nodes[final_id].word_id;
WordValue weight = m_nodes[final_id].weight;
return {word_id, weight, nid};
}
// --------------------------------------------------------------------------
template <class Descriptor>
NodeId TemplatedVocabulary<Descriptor>::getParentNode(WordId wid, int levelsup) const
{
NodeId ret = m_words[wid]->id; // node id
while (levelsup > 0 && ret != 0) // ret == 0 --> root
{
--levelsup;
ret = m_nodes[ret].parent;
}
return ret;
}
// --------------------------------------------------------------------------
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::getWordsFromNode(NodeId nid, std::vector<WordId>& words) const
{
words.clear();
if (m_nodes[nid].isLeaf())
{
words.push_back(m_nodes[nid].word_id);
}
else
{
words.reserve(m_k); // ^1, ^2, ...
std::vector<NodeId> parents;
parents.push_back(nid);
while (!parents.empty())
{
NodeId parentid = parents.back();
parents.pop_back();
const std::vector<NodeId>& child_ids = m_nodes[parentid].children;
std::vector<NodeId>::const_iterator cit;
for (cit = child_ids.begin(); cit != child_ids.end(); ++cit)
{
const Node& child_node = m_nodes[*cit];
if (child_node.isLeaf())
words.push_back(child_node.word_id);
else
parents.push_back(*cit);
} // for each child
} // while !parents.empty
}
}
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::loadRaw(const std::string& file)
{
Saiga::BinaryFile bf(file, std::ios_base::in);
if (!bf.strm.is_open())
{
throw std::runtime_error("Could not load Voc file.");
}
int scoringid;
int m_weighting_old;
bf >> m_k >> m_L >> scoringid >> m_weighting_old;
size_t nodecount;
bf >> nodecount;
m_nodes.resize(nodecount);
for (Node& n : m_nodes)
{
double weight;
bf >> n.id >> n.parent >> weight >> n.word_id >> n.descriptor;
n.weight = weight;
if (n.id != 0) m_nodes[n.parent].children.push_back(n.id);
}
// words
std::vector<std::pair<int, int>> words;
bf >> words;
m_words.resize(words.size());
for (auto i = 0; i < m_words.size(); ++i)
{
m_words[i] = &m_nodes[words[i].second];
}
}
template <class Descriptor>
void TemplatedVocabulary<Descriptor>::saveRaw(const std::string& file) const
{
Saiga::BinaryFile bf(file, std::ios_base::out);
bf << m_k << m_L << int(0) << int(0);
bf << (size_t)m_nodes.size();
for (const Node& n : m_nodes)
{
double weight = n.weight;
bf << n.id << n.parent << weight << n.word_id << n.descriptor;
}
// words
std::vector<std::pair<int, int>> words;
for (auto i = 0; i < m_words.size(); ++i)
{
words.emplace_back(i, m_words[i]->id);
}
bf << words;
}
// --------------------------------------------------------------------------
/**
* Writes printable information of the vocabulary
* @param os stream to write to
* @param voc
*/
template <class Descriptor>
std::ostream& operator<<(std::ostream& os, const TemplatedVocabulary<Descriptor>& voc)
{
os << "Vocabulary: k = " << voc.getBranchingFactor() << ", L = " << voc.getDepthLevels() << ", Weighting = ";
os << ", Scoring = ";
os << ", Number of words = " << voc.size();
return os;
}
} // namespace MiniBow2
|
PatchSelect_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC and Diamond Light Source Ltd.
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
* Copyright 2018 Diamond Light Source Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PatchSelect_core.h"
/* C-OMP implementation of non-local weight pre-calculation for non-local priors
* Weights and associated indices are stored into pre-allocated arrays and passed
* to the regulariser
*
*
* Input Parameters:
* 1. 2D/3D grayscale image/volume
* 2. Searching window (half-size of the main bigger searching window, e.g. 11)
* 3. Similarity window (half-size of the patch window, e.g. 2)
* 4. The number of neighbours to take (the most prominent after sorting neighbours will be taken)
* 5. noise-related parameter to calculate non-local weights
*
* Output [2D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. Weights_ij - associated weights
*
* Output [3D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. AR_k - indeces of j neighbours
* 4. Weights_ijk - associated weights
*/
void swap(float *xp, float *yp)
{
float temp = *xp;
*xp = *yp;
*yp = temp;
}
void swapUS(unsigned short *xp, unsigned short *yp)
{
unsigned short temp = *xp;
*xp = *yp;
*yp = temp;
}
/**************************************************/
float PatchSelect_CPU_main(float *A, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int SearchWindow, int SimilarWin, int NumNeighb, float h, int switchM)
{
int counterG;
long i, j, k;
float *Eucl_Vec, h2;
h2 = h*h;
/****************2D INPUT ***************/
if (dimZ == 0) {
/* generate a 2D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2))/(2*SimilarWin*SimilarWin));
counterG++;
}} /*main neighb loop */
/* for each pixel store indeces of the most similar neighbours (patches) */
if (switchM == 1) {
#pragma omp parallel for shared (A, Weights, H_i, H_j) private(i,j)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
Indeces2D_p(A, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}
}
else {
#pragma omp parallel for shared (A, Weights, H_i, H_j) private(i,j)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
Indeces2D(A, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}
}
}
else {
/****************3D INPUT ***************/
/* generate a 3D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
for(k=-SimilarWin; k<=SimilarWin; k++) {
Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2) + pow(((float) k), 2))/(2*SimilarWin*SimilarWin*SimilarWin));
counterG++;
}}} /*main neighb loop */
/* for each voxel store indeces of the most similar neighbours (patches) */
if (switchM == 1) {
#pragma omp parallel for shared (A, Weights, H_i, H_j, H_k) private(i,j,k)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
for(k=0; k<dimZ; k++) {
Indeces3D(A, H_i, H_j, H_k, Weights, j, i, (k), (dimX), (dimY), (dimZ), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}}
}
else {
#pragma omp parallel for shared (A, Weights, H_i, H_j, H_k) private(i,j,k)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
for(k=0; k<dimZ; k++) {
Indeces3D(A, H_i, H_j, H_k, Weights, (i), (j), (k), (dimX), (dimY), (dimZ), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}}
}
}
free(Eucl_Vec);
return 1;
}
float Indeces2D(float *Aorig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum;
unsigned short *ind_i, *ind_j;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) {
normsum = 0.0f; counterG = 0;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
i3 = i + i_c;
j3 = j + j_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY))) {
normsum += Eucl_Vec[counterG]*pow(Aorig[j3*dimX + (i3)] - Aorig[j2*dimX + (i2)], 2);
counterG++;
}}
}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
counter++;
}
}
}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter-1; x++) {
for (y = 0; y < counter-x-1; y++) {
if (Weight_Vec[y] < Weight_Vec[y+1]) {
swap(&Weight_Vec[y], &Weight_Vec[y+1]);
swapUS(&ind_i[y], &ind_i[y+1]);
swapUS(&ind_j[y], &ind_j[y+1]);
}
}
}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into pre-allocated arrays */
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(Weight_Vec);
return 1;
}
float Indeces2D_p(float *Aorig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum;
unsigned short *ind_i, *ind_j;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) {
normsum = 0.0f; counterG = 0;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
i3 = i + i_c;
j3 = j + j_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY))) {
//normsum += Eucl_Vec[counterG]*pow(Aorig[j3*dimX + (i3)] - Aorig[j2*dimX + (i2)], 2);
normsum += Eucl_Vec[counterG]*pow(Aorig[i3*dimY + (j3)] - Aorig[i2*dimY + (j2)], 2);
counterG++;
}}
}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
counter++;
}
}
}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter-1; x++) {
for (y = 0; y < counter-x-1; y++) {
if (Weight_Vec[y] < Weight_Vec[y+1]) {
swap(&Weight_Vec[y], &Weight_Vec[y+1]);
swapUS(&ind_i[y], &ind_i[y+1]);
swapUS(&ind_j[y], &ind_j[y+1]);
}
}
}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into pre-allocated arrays */
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + i*dimY+j;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(Weight_Vec);
return 1;
}
float Indeces3D(float *Aorig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimY, long dimX, long dimZ, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, k1, i_m, j_m, k_m, i_c, j_c, k_c, i2, j2, k2, i3, j3, k3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum, temp;
unsigned short *ind_i, *ind_j, *ind_k, temp_i, temp_j, temp_k;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_k = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0l;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
for(k_m=-SearchWindow; k_m<=SearchWindow; k_m++) {
k1 = k+k_m;
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) {
normsum = 0.0f; counterG = 0l;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
for(k_c=-SimilarWin; k_c<=SimilarWin; k_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
k2 = k1 + k_c;
i3 = i + i_c;
j3 = j + j_c;
k3 = k + k_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY)) && ((k2 >= 0) && (k2 < dimZ))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY)) && ((k3 >= 0) && (k3 < dimZ))) {
normsum += Eucl_Vec[counterG]*pow(Aorig[(dimX*dimY*k3) + j3*dimX + (i3)] - Aorig[(dimX*dimY*k2) + j2*dimX + (i2)], 2);
counterG++;
}}
}}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
ind_k[counter] = k1;
counter ++;
}
}
}}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter; x++) {
for (y = 0; y < counter; y++) {
if (Weight_Vec[y] < Weight_Vec[x]) {
temp = Weight_Vec[y+1];
temp_i = ind_i[y+1];
temp_j = ind_j[y+1];
temp_k = ind_k[y+1];
Weight_Vec[y+1] = Weight_Vec[y];
Weight_Vec[y] = temp;
ind_i[y+1] = ind_i[y];
ind_i[y] = temp_i;
ind_j[y+1] = ind_j[y];
ind_j[y] = temp_j;
ind_k[y+1] = ind_k[y];
ind_k[y] = temp_k;
}}}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into arrays */
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
H_k[index] = ind_k[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(ind_k);
free(Weight_Vec);
return 1;
}
|
DRB042-3mm-tile-no.c | /**
* 3mm.c: This file is part of the PolyBench/C 3.2 test suite.
* with tiling 16x16 and nested SIMD
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "polybench/3mm.h"
/* Array initialization. */
static void init_array(int ni,int nj,int nk,int nl,int nm,double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0])
{
//int i;
//int j;
{
int c3;
int c4;
int c1;
int c2;
if (ni >= ((0 > -1 * nj + -1 * nm + 1?0 : -1 * nj + -1 * nm + 1)) && nj >= 0 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) < (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16))?(((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) : (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16)))); c1++) {
if (c1 <= (((((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) < nm + -1?((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) < nm + -1?((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nk > nl?nk : nl); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nk > nm?nk : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = (nj > nk?nj : nk); c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
for (c3 = nj; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nl + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (nj > nm?nj : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
for (c3 = nk; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= ((((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) < nm + -1?((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (nk > nm?nk : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= nk + -1; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= nm + -1; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
for (c3 = (nj > nk?nj : nk); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (ni > nm?ni : nm); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
for (c3 = (ni > nj?ni : nj); c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = (ni > nk?ni : nk); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (ni > nm?ni : nm); c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {
for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nk > nl?nk : nl); c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nk; c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (ni > nj?ni : nj); c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {
for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nk; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {
for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nk; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {
for (c2 = (((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {
for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
if (ni >= ((0 > -1 * nj + 1?0 : -1 * nj + 1)) && nj >= 0 && nk >= 1 && nm <= -1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) < (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16))?(((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) : (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16)))); c1++) {
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
}
}
if (ni >= 0 && nj <= -1 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((ni + nm + -1) * 16 < 0?((16 < 0?-((-(ni + nm + -1) + 16 + 1) / 16) : -((-(ni + nm + -1) + 16 - 1) / 16))) : (ni + nm + -1) / 16)); c1++) {
if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
#pragma omp simd
for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {
for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
if (nj <= -1 && nk >= 1 && nm <= -1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) {
A[c3][c4] = ((double )c3) * c4 / ni;
}
}
}
}
}
if (ni >= 0 && nj >= 0 && nk <= -1 && nm >= 1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) {
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
if (ni >= 0 && nj <= -1 && nk <= -1 && nl >= 1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
if (ni <= -1 && nj >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) < (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16))?(((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) : (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16)))); c1++) {
if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
if (c1 == c2) {
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
#pragma omp simd
for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) {
for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nj; c3 <= 16 * c1 + 15; c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) {
for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {
for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
if (ni <= -1 && nj >= 1 && nm <= -1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) {
B[c3][c4] = ((double )c3) * (c4 + 1) / nj;
}
}
}
}
}
if (ni <= -1 && nj <= -1 && nk >= 0 && nl >= 1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
if (ni <= -1 && nj >= 0 && nk <= -1 && nm >= 1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) {
if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
#pragma omp simd
for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
#pragma omp simd
for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) {
C[c3][c4] = ((double )c3) * (c4 + 3) / nl;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) {
for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) {
for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
if (ni <= -1 && nj <= -1 && nk <= -1 && nl >= 1) {
#pragma omp parallel for private(c2, c4, c3)
for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) {
D[c3][c4] = ((double )c3) * (c4 + 2) / nk;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int ni,int nl,double G[128 + 0][128 + 0])
{
int i;
int j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf(stderr,"%0.2lf ",G[i][j]);
if ((i * ni + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_3mm(int ni,int nj,int nk,int nl,int nm,double E[128 + 0][128 + 0],double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double F[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0],double G[128 + 0][128 + 0])
{
// int i;
// int j;
// int k;
//#pragma scop
{
int c5;
int c10;
int c2;
int c1;
int c6;
int c7;
if (ni >= 0 && nj >= 0 && nl >= 1) {
#pragma omp parallel for private(c7, c2, c10)
for (c1 = 0; c1 <= (((nj + ni + -1) * 16 < 0?((16 < 0?-((-(nj + ni + -1) + 16 + 1) / 16) : -((-(nj + ni + -1) + 16 - 1) / 16))) : (nj + ni + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {
G[c10][c7] = 0;
}
}
}
if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {
F[c10][c7] = 0;
}
}
}
}
}
}
if (ni <= -1 && nl >= 1) {
#pragma omp parallel for private(c7, c2, c10)
for (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {
F[c10][c7] = 0;
}
}
}
}
}
if (nj <= -1 && nl >= 1) {
#pragma omp parallel for private(c7, c2, c10)
for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {
G[c10][c7] = 0;
}
}
}
}
}
if (nl >= 1 && nm >= 1) {
#pragma omp parallel for private(c7, c6, c2, c10, c5)
for (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) {
for (c5 = 0; c5 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c5++) {
for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nm + -1?16 * c5 + 15 : nm + -1)); c6++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) {
F[c10][c7] += C[c10][c6] * D[c6][c7];
}
}
}
}
}
}
}
if (nj >= 1) {
#pragma omp parallel for private(c7, c2, c10)
for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {
E[c10][c7] = 0;
}
}
}
}
}
if (nj >= 1) {
#pragma omp parallel for private(c7, c6, c2, c10, c5)
for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) {
for (c5 = 0; c5 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c5++) {
for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nk + -1?16 * c5 + 15 : nk + -1)); c6++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {
E[c10][c7] += A[c10][c6] * B[c6][c7];
}
}
}
}
for (c5 = 0; c5 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c5++) {
for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nl + -1?16 * c5 + 15 : nl + -1)); c6++) {
for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) {
#pragma omp simd
for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) {
G[c10][c6] += E[c10][c7] * F[c7][c6];
}
}
}
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
omprace_init();
/* Retrieve problem size. */
int ni = 128;
int nj = 128;
int nk = 128;
int nl = 128;
int nm = 128;
/* Variable declaration/allocation. */
double (*E)[128 + 0][128 + 0];
E = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*A)[128 + 0][128 + 0];
A = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*B)[128 + 0][128 + 0];
B = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*F)[128 + 0][128 + 0];
F = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*C)[128 + 0][128 + 0];
C = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*D)[128 + 0][128 + 0];
D = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
double (*G)[128 + 0][128 + 0];
G = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(ni,nj,nk,nl,nm, *A, *B, *C, *D);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_3mm(ni,nj,nk,nl,nm, *E, *A, *B, *F, *C, *D, *G);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(ni,nl, *G);
/* Be clean. */
free(((void *)E));
;
free(((void *)A));
;
free(((void *)B));
;
free(((void *)F));
;
free(((void *)C));
;
free(((void *)D));
;
free(((void *)G));
;
omprace_fini();
return 0;
}
|
GB_binop__lxor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint64)
// A*D function (colscale): GB (_AxD__lxor_uint64)
// D*A function (rowscale): GB (_DxB__lxor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint64)
// C=scalar+B GB (_bind1st__lxor_uint64)
// C=scalar+B' GB (_bind1st_tran__lxor_uint64)
// C=A+scalar GB (_bind2nd__lxor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dynwave.c | //-----------------------------------------------------------------------------
// dynwave.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (5.1.001)
// 03/28/14 (5.1.002)
// 09/15/14 (5.1.007)
// 03/19/15 (5.1.008)
// 08/01/16 (5.1.011)
// 05/10/18 (5.1.013)
// Author: L. Rossman (EPA)
// M. Tryby (EPA)
// R. Dickinson (CDM)
//
// Dynamic wave flow routing functions.
//
// This module solves the dynamic wave flow routing equations using
// Picard Iterations (i.e., a method of successive approximations)
// to solve the explicit form of the continuity and momentum equations
// for conduits.
//
// Build 5.1.002:
// - Only non-ponded nodal surface area is saved for use in
// surcharge algorithm.
//
// Build 5.1.007:
// - Node losses added to node outflow variable instead of treated
// as a separate item when computing change in node flow volume.
//
// Build 5.1.008:
// - Module-specific constants moved here from project.c.
// - Support added for user-specified minimum variable time step.
// - Node crown elevations found here instead of in flowrout.c module.
// - OpenMP use to parallelize findLinkFlows() & findNodeDepths().
// - Bug in finding complete list of capacity limited links fixed.
//
// Build 5.1.011:
// - Added test for failed memory allocation.
// - Fixed illegal array index bug for Ideal Pumps.
//
// Build 5.1.013:
// - Include omp.h protected against lack of compiler support for OpenMP.
// - SurchargeMethod option used to decide how node surcharging is handled.
// - Storage nodes allowed to pressurize if their surcharge depth > 0.
// - Minimum flow needed to compute a Courant time step modified.
//
// Build 5.1.014:
// - updateNodeFlows() modified to subtract conduit evap. and seepage losses
// from downstream node inflow instead of upstream node outflow.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include "headers.h"
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP) //(5.1.013)
#include <omp.h>
#endif
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
static const double MINTIMESTEP = 0.001; // min. time step (sec)
static const double OMEGA = 0.5; // under-relaxation parameter
static const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.)
static const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft)
static const double EXTRAN_CROWN_CUTOFF = 0.96; // crown cutoff for EXTRAN //(5.1.013)
static const double SLOT_CROWN_CUTOFF = 0.985257; // crown cutoff for SLOT //(5.1.013)
static const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step
//-----------------------------------------------------------------------------
// Data Structures
//-----------------------------------------------------------------------------
typedef struct
{
char converged; // TRUE if iterations for a node done
double newSurfArea; // current surface area (ft2)
double oldSurfArea; // previous surface area (ft2)
double sumdqdh; // sum of dqdh from adjoining links
double dYdT; // change in depth w.r.t. time (ft/sec)
} TXnode;
//-----------------------------------------------------------------------------
// Shared Variables
//-----------------------------------------------------------------------------
static double VariableStep; // size of variable time step (sec)
static TXnode* Xnode; // extended nodal information
static double Omega; // actual under-relaxation parameter
static int Steps; // number of Picard iterations
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initRoutingStep(void);
static void initNodeStates(void);
static void findBypassedLinks();
static void findLimitedLinks();
static void findLinkFlows(double dt);
static int isTrueConduit(int link);
static void findNonConduitFlow(int link, double dt);
static void findNonConduitSurfArea(int link);
static double getModPumpFlow(int link, double q, double dt);
static void updateNodeFlows(int link);
static int findNodeDepths(double dt);
static void setNodeDepth(int node, double dt);
static double getFloodedDepth(int node, int canPond, double dV, double yNew,
double yMax, double dt);
static double getVariableStep(double maxStep);
static double getLinkStep(double tMin, int *minLink);
static double getNodeStep(double tMin, int *minNode);
//=============================================================================
void dynwave_init()
//
// Input: none
// Output: none
// Purpose: initializes dynamic wave routing method.
//
{
int i, j;
double z;
VariableStep = 0.0;
Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode));
if ( Xnode == NULL )
{
report_writeErrorMsg(ERR_MEMORY,
" Not enough memory for dynamic wave routing.");
return;
}
// --- initialize node surface areas & crown elev.
for (i = 0; i < Nobjects[NODE]; i++ )
{
Xnode[i].newSurfArea = 0.0;
Xnode[i].oldSurfArea = 0.0;
Node[i].crownElev = Node[i].invertElev;
}
// --- initialize links & update node crown elevations
for (i = 0; i < Nobjects[LINK]; i++)
{
j = Link[i].node1;
z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
j = Link[i].node2;
z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
Link[i].flowClass = DRY;
Link[i].dqdh = 0.0;
}
// --- set crown cutoff for finding top width of closed conduits //(5.1.013)
if ( SurchargeMethod == SLOT ) CrownCutoff = SLOT_CROWN_CUTOFF; //(5.1.013)
else CrownCutoff = EXTRAN_CROWN_CUTOFF; //(5.1.013)
}
//=============================================================================
void dynwave_close()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for dynamic wave routing method.
//
{
FREE(Xnode);
}
//=============================================================================
void dynwave_validate()
//
// Input: none
// Output: none
// Purpose: adjusts dynamic wave routing options.
//
{
if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep;
if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP;
if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA;
else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH);
if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL;
else HeadTol /= UCF(LENGTH);
if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS;
}
//=============================================================================
double dynwave_getRoutingStep(double fixedStep)
//
// Input: fixedStep = user-supplied fixed time step (sec)
// Output: returns routing time step (sec)
// Purpose: computes variable routing time step if applicable.
//
{
// --- use user-supplied fixed step if variable step option turned off
// or if its smaller than the min. allowable variable time step
if ( CourantFactor == 0.0 ) return fixedStep;
if ( fixedStep < MINTIMESTEP ) return fixedStep;
// --- at start of simulation (when current variable step is zero)
// use the minimum allowable time step
if ( VariableStep == 0.0 )
{
VariableStep = MinRouteStep;
}
// --- otherwise compute variable step based on current flow solution
else VariableStep = getVariableStep(fixedStep);
// --- adjust step to be a multiple of a millisecond
VariableStep = floor(1000.0 * VariableStep) / 1000.0;
return VariableStep;
}
//=============================================================================
int dynwave_execute(double tStep)
//
// Input: links = array of topo sorted links indexes
// tStep = time step (sec)
// Output: returns number of iterations used
// Purpose: routes flows through drainage network over current time step.
//
{
int converged;
// --- initialize
if ( ErrorCode ) return 0;
Steps = 0;
converged = FALSE;
Omega = OMEGA;
initRoutingStep();
// --- keep iterating until convergence
while ( Steps < MaxTrials )
{
// --- execute a routing step & check for nodal convergence
initNodeStates();
findLinkFlows(tStep);
converged = findNodeDepths(tStep);
Steps++;
if ( Steps > 1 )
{
if ( converged ) break;
// --- check if link calculations can be skipped in next step
findBypassedLinks();
}
}
if ( !converged ) NonConvergeCount++;
// --- identify any capacity-limited conduits
findLimitedLinks();
return Steps;
}
//=============================================================================
void initRoutingStep()
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
Xnode[i].converged = FALSE;
Xnode[i].dYdT = 0.0;
}
for (i = 0; i < Nobjects[LINK]; i++)
{
Link[i].bypassed = FALSE;
Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = 0.0;
}
// --- a2 preserves conduit area from solution at last time step
for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1;
}
//=============================================================================
void initNodeStates()
//
// Input: none
// Output: none
// Purpose: initializes node's surface area, inflow & outflow
//
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
// --- initialize nodal surface area
if ( AllowPonding )
{
Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth);
}
else
{
Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth);
}
/* //// Removed for release 5.1.013. /// //(5.1.013)
if ( Xnode[i].newSurfArea < MinSurfArea )
{
Xnode[i].newSurfArea = MinSurfArea;
}
*/
// --- initialize nodal inflow & outflow
Node[i].inflow = 0.0;
Node[i].outflow = Node[i].losses;
if ( Node[i].newLatFlow >= 0.0 )
{
Node[i].inflow += Node[i].newLatFlow;
}
else
{
Node[i].outflow -= Node[i].newLatFlow;
}
Xnode[i].sumdqdh = 0.0;
}
}
//=============================================================================
void findBypassedLinks()
{
int i;
for (i = 0; i < Nobjects[LINK]; i++)
{
if ( Xnode[Link[i].node1].converged &&
Xnode[Link[i].node2].converged )
Link[i].bypassed = TRUE;
else Link[i].bypassed = FALSE;
}
}
//=============================================================================
void findLimitedLinks()
//
// Input: none
// Output: none
// Purpose: determines if a conduit link is capacity limited.
//
{
int j, n1, n2, k;
double h1, h2;
for (j = 0; j < Nobjects[LINK]; j++)
{
// ---- check only non-dummy conduit links
if ( !isTrueConduit(j) ) continue;
// --- check that upstream end is full
k = Link[j].subIndex;
Conduit[k].capacityLimited = FALSE;
if ( Conduit[k].a1 >= Link[j].xsect.aFull )
{
// --- check if HGL slope > conduit slope
n1 = Link[j].node1;
n2 = Link[j].node2;
h1 = Node[n1].newDepth + Node[n1].invertElev;
h2 = Node[n2].newDepth + Node[n2].invertElev;
if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length )
Conduit[k].capacityLimited = TRUE;
}
}
}
//=============================================================================
void findLinkFlows(double dt)
{
int i;
// --- find new flow in each non-dummy conduit
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) && !Link[i].bypassed )
dwflow_findConduitFlow(i, Steps, Omega, dt);
}
}
// --- update inflow/outflows for nodes attached to non-dummy conduits
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) ) updateNodeFlows(i);
}
// --- find new flows for all dummy conduits, pumps & regulators
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( !isTrueConduit(i) )
{
if ( !Link[i].bypassed ) findNonConduitFlow(i, dt);
updateNodeFlows(i);
}
}
}
//=============================================================================
int isTrueConduit(int j)
{
return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY );
}
//=============================================================================
void findNonConduitFlow(int i, double dt)
//
// Input: i = link index
// dt = time step (sec)
// Output: none
// Purpose: finds new flow in a non-conduit-type link
//
{
double qLast; // previous link flow (cfs)
double qNew; // new link flow (cfs)
// --- get link flow from last iteration
qLast = Link[i].newFlow;
Link[i].dqdh = 0.0;
// --- get new inflow to link from its upstream node
// (link_getInflow returns 0 if flap gate closed or pump is offline)
qNew = link_getInflow(i);
if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt);
// --- find surface area at each end of link
findNonConduitSurfArea(i);
// --- apply under-relaxation with flow from previous iteration;
// --- do not allow flow to change direction without first being 0
if ( Steps > 0 && Link[i].type != PUMP )
{
qNew = (1.0 - Omega) * qLast + Omega * qNew;
if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew);
}
Link[i].newFlow = qNew;
}
//=============================================================================
double getModPumpFlow(int i, double q, double dt)
//
// Input: i = link index
// q = pump flow from pump curve (cfs)
// dt = time step (sec)
// Output: returns modified pump flow rate (cfs)
// Purpose: modifies pump curve pumping rate depending on amount of water
// available at pump's inlet node.
//
{
int j = Link[i].node1; // pump's inlet node index
int k = Link[i].subIndex; // pump's index
double newNetInflow; // inflow - outflow rate (cfs)
double netFlowVolume; // inflow - outflow volume (ft3)
double y; // node depth (ft)
if ( q == 0.0 ) return q;
// --- case where inlet node is a storage node:
// prevent node volume from going negative
if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt);
// --- case where inlet is a non-storage node
switch ( Pump[k].type )
{
// --- for Type1 pump, a volume is computed for inlet node,
// so make sure it doesn't go negative
case TYPE1_PUMP:
return node_getMaxOutflow(j, q, dt);
// --- for other types of pumps, if pumping rate would make depth
// at upstream node negative, then set pumping rate = inflow
case TYPE2_PUMP:
case TYPE4_PUMP:
case TYPE3_PUMP:
newNetInflow = Node[j].inflow - Node[j].outflow - q;
netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt;
y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea;
if ( y <= 0.0 ) return Node[j].inflow;
}
return q;
}
//=============================================================================
void findNonConduitSurfArea(int i)
//
// Input: i = link index
// Output: none
// Purpose: finds the surface area contributed by a non-conduit
// link to its upstream and downstream nodes.
//
{
if ( Link[i].type == ORIFICE )
{
Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.;
}
// --- no surface area for weirs to maintain SWMM 4 compatibility
else Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = Link[i].surfArea1;
if ( Link[i].flowClass == UP_CRITICAL ||
Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0;
if ( Link[i].flowClass == DN_CRITICAL ||
Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0;
}
//=============================================================================
void updateNodeFlows(int i)
//
// Input: i = link index
// q = link flow rate (cfs)
// Output: none
// Purpose: updates cumulative inflow & outflow at link's end nodes.
//
{
int k;
int barrels = 1;
int n1 = Link[i].node1;
int n2 = Link[i].node2;
double q = Link[i].newFlow;
double uniformLossRate = 0.0;
// --- compute any uniform seepage loss from a conduit
if ( Link[i].type == CONDUIT )
{
k = Link[i].subIndex;
uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate;
barrels = Conduit[k].barrels;
uniformLossRate *= barrels; //(5.1.014)
}
// --- update total inflow & outflow at upstream/downstream nodes
if ( q >= 0.0 )
{
Node[n1].outflow += q; //(5.1.014)
Node[n2].inflow += q - uniformLossRate; //(5.1.014)
}
else
{
Node[n1].inflow -= q + uniformLossRate; //(5.1.014)
Node[n2].outflow -= q; //(5.1.014)
}
// --- add surf. area contributions to upstream/downstream nodes
Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels;
Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels;
// --- update summed value of dqdh at each end node
Xnode[Link[i].node1].sumdqdh += Link[i].dqdh;
if ( Link[i].type == PUMP )
{
k = Link[i].subIndex;
if ( Pump[k].type != TYPE4_PUMP )
{
Xnode[n2].sumdqdh += Link[i].dqdh;
}
}
else Xnode[n2].sumdqdh += Link[i].dqdh;
}
//=============================================================================
int findNodeDepths(double dt)
{
int i;
int converged; // convergence flag
double yOld; // previous node depth (ft)
// --- compute outfall depths based on flow in connecting link
for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i);
// --- compute new depth for all non-outfall nodes and determine if
// depth change from previous iteration is below tolerance
converged = TRUE;
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for private(yOld)
for ( i = 0; i < Nobjects[NODE]; i++ )
{
if ( Node[i].type == OUTFALL ) continue;
yOld = Node[i].newDepth;
setNodeDepth(i, dt);
Xnode[i].converged = TRUE;
if ( fabs(yOld - Node[i].newDepth) > HeadTol )
{
converged = FALSE;
Xnode[i].converged = FALSE;
}
}
}
return converged;
}
//=============================================================================
void setNodeDepth(int i, double dt)
//
// Input: i = node index
// dt = time step (sec)
// Output: none
// Purpose: sets depth at non-outfall node after current time step.
//
{
int canPond; // TRUE if node can pond overflows
int isPonded; // TRUE if node is currently ponded
int isSurcharged = FALSE; // TRUE if node is surcharged //(5.1.013)
double dQ; // inflow minus outflow at node (cfs)
double dV; // change in node volume (ft3)
double dy; // change in node depth (ft)
double yMax; // max. depth at node (ft)
double yOld; // node depth at previous time step (ft)
double yLast; // previous node depth (ft)
double yNew; // new node depth (ft)
double yCrown; // depth to node crown (ft)
double surfArea; // node surface area (ft2)
double denom; // denominator term
double corr; // correction factor
double f; // relative surcharge depth
// --- see if node can pond water above it
canPond = (AllowPonding && Node[i].pondedArea > 0.0);
isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth);
// --- initialize values
yCrown = Node[i].crownElev - Node[i].invertElev;
yOld = Node[i].oldDepth;
yLast = Node[i].newDepth;
Node[i].overflow = 0.0;
surfArea = Xnode[i].newSurfArea;
surfArea = MAX(surfArea, MinSurfArea); //(5.1.013)
// --- determine average net flow volume into node over the time step
dQ = Node[i].inflow - Node[i].outflow;
dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt;
//// Following code segment added to release 5.1.013. //// //(5.1.013)
// --- determine if node is EXTRAN surcharged
if (SurchargeMethod == EXTRAN)
{
// --- ponded nodes don't surcharge
if (isPonded) isSurcharged = FALSE;
// --- closed storage units that are full are in surcharge
else if (Node[i].type == STORAGE)
{
isSurcharged = (Node[i].surDepth > 0.0 &&
yLast > Node[i].fullDepth);
}
// --- surcharge occurs when node depth exceeds top of its highest link
else isSurcharged = (yCrown > 0.0 && yLast > yCrown);
}
/////////////////////////////////////////////////////////////
// --- if node not surcharged, base depth change on surface area
if (!isSurcharged) //(5.1.013)
{
dy = dV / surfArea;
yNew = yOld + dy;
// --- save non-ponded surface area for use in surcharge algorithm
if ( !isPonded ) Xnode[i].oldSurfArea = surfArea;
// --- apply under-relaxation to new depth estimate
if ( Steps > 0 )
{
yNew = (1.0 - Omega) * yLast + Omega * yNew;
}
// --- don't allow a ponded node to drop much below full depth
if ( isPonded && yNew < Node[i].fullDepth )
yNew = Node[i].fullDepth - FUDGE;
}
// --- if node surcharged, base depth change on dqdh
// NOTE: depth change is w.r.t depth from previous
// iteration; also, do not apply under-relaxation.
else
{
// --- apply correction factor for upstream terminal nodes
corr = 1.0;
if ( Node[i].degree < 0 ) corr = 0.6;
// --- allow surface area from last non-surcharged condition
// to influence dqdh if depth close to crown depth
denom = Xnode[i].sumdqdh;
if ( yLast < 1.25 * yCrown )
{
f = (yLast - yCrown) / yCrown;
denom += (Xnode[i].oldSurfArea/dt -
Xnode[i].sumdqdh) * exp(-15.0 * f);
}
// --- compute new estimate of node depth
if ( denom == 0.0 ) dy = 0.0;
else dy = corr * dQ / denom;
yNew = yLast + dy;
if ( yNew < yCrown ) yNew = yCrown - FUDGE;
// --- don't allow a newly ponded node to rise much above full depth
if ( canPond && yNew > Node[i].fullDepth )
yNew = Node[i].fullDepth + FUDGE;
}
// --- depth cannot be negative
if ( yNew < 0 ) yNew = 0.0;
// --- determine max. non-flooded depth
yMax = Node[i].fullDepth;
if ( canPond == FALSE ) yMax += Node[i].surDepth;
// --- find flooded depth & volume
if ( yNew > yMax )
{
yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt);
}
else Node[i].newVolume = node_getVolume(i, yNew);
// --- compute change in depth w.r.t. time
Xnode[i].dYdT = fabs(yNew - yOld) / dt;
// --- save new depth for node
Node[i].newDepth = yNew;
}
//=============================================================================
double getFloodedDepth(int i, int canPond, double dV, double yNew,
double yMax, double dt)
//
// Input: i = node index
// canPond = TRUE if water can pond over node
// isPonded = TRUE if water is currently ponded
// dV = change in volume over time step (ft3)
// yNew = current depth at node (ft)
// yMax = max. depth at node before ponding (ft)
// dt = time step (sec)
// Output: returns depth at node when flooded (ft)
// Purpose: computes depth, volume and overflow for a flooded node.
//
{
if ( canPond == FALSE )
{
Node[i].overflow = dV / dt;
Node[i].newVolume = Node[i].fullVolume;
yNew = yMax;
}
else
{
Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume);
Node[i].overflow = (Node[i].newVolume -
MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt;
}
if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0;
return yNew;
}
//=============================================================================
double getVariableStep(double maxStep)
//
// Input: maxStep = user-supplied max. time step (sec)
// Output: returns time step (sec)
// Purpose: finds time step that satisfies stability criterion but
// is no greater than the user-supplied max. time step.
//
{
int minLink = -1; // index of link w/ min. time step
int minNode = -1; // index of node w/ min. time step
double tMin; // allowable time step (sec)
double tMinLink; // allowable time step for links (sec)
double tMinNode; // allowable time step for nodes (sec)
// --- find stable time step for links & then nodes
tMin = maxStep;
tMinLink = getLinkStep(tMin, &minLink);
tMinNode = getNodeStep(tMinLink, &minNode);
// --- use smaller of the link and node time step
tMin = tMinLink;
if ( tMinNode < tMin )
{
tMin = tMinNode ;
minLink = -1;
}
// --- update count of times the minimum node or link was critical
stats_updateCriticalTimeCount(minNode, minLink);
// --- don't let time step go below an absolute minimum
if ( tMin < MinRouteStep ) tMin = MinRouteStep;
return tMin;
}
//=============================================================================
double getLinkStep(double tMin, int *minLink)
//
// Input: tMin = critical time step found so far (sec)
// Output: minLink = index of link with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for conduits based on Courant criterion.
//
{
int i; // link index
int k; // conduit index
double q; // conduit flow (cfs)
double t; // time step (sec)
double tLink = tMin; // critical link time step (sec)
// --- examine each conduit link
for ( i = 0; i < Nobjects[LINK]; i++ )
{
if ( Link[i].type == CONDUIT )
{
// --- skip conduits with negligible flow, area or Fr
k = Link[i].subIndex;
q = fabs(Link[i].newFlow) / Conduit[k].barrels;
if ( q <= FUDGE //(5.1.013)
|| Conduit[k].a1 <= FUDGE
|| Link[i].froude <= 0.01
) continue;
// --- compute time step to satisfy Courant condition
t = Link[i].newVolume / Conduit[k].barrels / q;
t = t * Conduit[k].modLength / link_getLength(i);
t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor;
// --- update critical link time step
if ( t < tLink )
{
tLink = t;
*minLink = i;
}
}
}
return tLink;
}
//=============================================================================
double getNodeStep(double tMin, int *minNode)
//
// Input: tMin = critical time step found so far (sec)
// Output: minNode = index of node with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for nodes based on max. allowable
// projected change in depth.
//
{
int i; // node index
double maxDepth; // max. depth allowed at node (ft)
double dYdT; // change in depth per unit time (ft/sec)
double t1; // time needed to reach depth limit (sec)
double tNode = tMin; // critical node time step (sec)
// --- find smallest time so that estimated change in nodal depth
// does not exceed safety factor * maxdepth
for ( i = 0; i < Nobjects[NODE]; i++ )
{
// --- see if node can be skipped
if ( Node[i].type == OUTFALL ) continue;
if ( Node[i].newDepth <= FUDGE) continue;
if ( Node[i].newDepth + FUDGE >=
Node[i].crownElev - Node[i].invertElev ) continue;
// --- define max. allowable depth change using crown elevation
maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25;
if ( maxDepth < FUDGE ) continue;
dYdT = Xnode[i].dYdT;
if (dYdT < FUDGE ) continue;
// --- compute time to reach max. depth & compare with critical time
t1 = maxDepth / dYdT;
if ( t1 < tNode )
{
tNode = t1;
*minNode = i;
}
}
return tNode;
}
|
debug_private.c | // This testcase checks emission of debug info for variables inside
// private/firstprivate/lastprivate.
// REQUIRES: x86_64-linux
// RUN: %clang_cc1 -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// CHECK: define internal i32 @.omp_task_entry.
// CHECK: call void @llvm.dbg.declare(metadata i32** %.priv.ptr.addr.i, metadata [[PRIV1:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK: call void @llvm.dbg.declare(metadata i32** %.priv.ptr.addr1.i, metadata [[PRIV2:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK: call void @llvm.dbg.declare(metadata i32** %.firstpriv.ptr.addr.i, metadata [[FPRIV:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK: [[PRIV1]] = !DILocalVariable(name: "priv1"
// CHECK: [[PRIV2]] = !DILocalVariable(name: "priv2"
// CHECK: [[FPRIV]] = !DILocalVariable(name: "fpriv"
extern int printf(const char *, ...);
int foo(int n) {
int res, priv1, priv2, fpriv;
fpriv = n + 4;
if (n < 2)
return n;
else {
#pragma omp task shared(res) private(priv1, priv2) firstprivate(fpriv)
{
priv1 = n;
priv2 = n + 2;
printf("Task n=%d,priv1=%d,priv2=%d,fpriv=%d\n", n, priv1, priv2, fpriv);
res = priv1 + priv2 + fpriv + foo(n - 1);
}
#pragma omp taskwait
return res;
}
}
int main() {
int n = 10;
printf("foo(%d) = %d\n", n, foo(n));
return 0;
}
|
pdgstrs_lsum.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Perform local block modifications: lsum[i] -= L_i,k * X[k]
*
* <pre>
* -- Distributed SuperLU routine (version 6.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* March 15, 2003
*
* Modified:
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 2, 2001 use MPI_Isend/MPI_Irecv with MPI_Test
* February 8, 2019 version 6.1.1
* </pre>
*/
#include "superlu_ddefs.h"
#include "superlu_defs.h"
#ifndef CACHELINE
#define CACHELINE 64 /* bytes, Xeon Phi KNL, Cori haswell, Edision */
#endif
#define ISEND_IRECV
/*
* Function prototypes
*/
#ifdef _CRAY
fortran void STRSM(_fcd, _fcd, _fcd, _fcd, int*, int*, double*,
double*, int*, double*, int*);
fortran void SGEMM(_fcd, _fcd, int*, int*, int*, double*, double*,
int*, double*, int*, double*, double*, int*);
_fcd ftcs1;
_fcd ftcs2;
_fcd ftcs3;
#endif
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void dlsum_fmod
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local) */
double *xk, /* X[k]. */
double *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t lptr, /* Starting position in lsub[*]. */
int_t luptr, /* Starting position in lusup[*]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
double alpha = 1.0, beta = 0.0;
double *lusup, *lusup1;
double *dest;
int iam, iknsupc, myrow, nbrow, nsupr, nsupr1, p, pi;
int_t i, ii, ik, il, ikcol, irow, j, lb, lk, lib, rel;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
MPI_Status status;
int test_flag;
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
#if ( PROFlevel>=1 )
TIC(t1);
#endif
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
lusup = Llu->Lnzval_bc_ptr[lk];
nsupr = lsub[1];
for (lb = 0; lb < nlb; ++lb) {
ik = lsub[lptr]; /* Global block number, row-wise. */
nbrow = lsub[lptr+1];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow, 1, 1 );
#else
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#endif
stat->ops[SOLVE] += 2 * nbrow * nrhs * knsupc + nbrow * nrhs;
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
dest = &lsum[il];
lptr += LB_DESCRIPTOR;
rel = xsup[ik]; /* Global row index of block ik. */
for (i = 0; i < nbrow; ++i) {
irow = lsub[lptr++] - rel; /* Relative row. */
RHS_ITERATE(j)
dest[irow + j*iknsupc] -= rtemp[i + j*nbrow];
}
luptr += nbrow;
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_GEMM] += t2;
#endif
if ( (--fmod[lk])==0 ) { /* Local accumulation done. */
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( lk );
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
x[i + ii + j*iknsupc] += lsum[i + il + j*iknsupc];
if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_TRSM] += t2;
#endif
stat->ops[SOLVE] += iknsupc * (iknsupc - 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( fsendx_plist[lk][p] != EMPTY ) {
pi = PNUM( p, ikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
nlb1 = lsub1[0] - 1;
lptr1 = BC_HEADER + LB_DESCRIPTOR + iknsupc;
luptr1 = iknsupc; /* Skip diagonal block L(I,I). */
dlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, lptr1, luptr1, xsup,
grid, Llu, send_req, stat);
} /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
} /* for lb ... */
} /* dLSUM_FMOD */
/************************************************************************/
void dlsum_bmod
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local). */
double *xk, /* X[k]. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
double alpha = 1.0, beta = 0.0;
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
double *uval, *dest, *y;
int_t *lsub;
double *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
MPI_Status status;
int test_flag;
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
for (irow = fnz; irow < iklrow; ++irow)
dest[irow - ikfrow] -= uval[uptr++] * y[jj];
stat->ops[SOLVE] += 2 * (iklrow - fnz);
}
} /* for jj ... */
}
if ( (--bmod[ik]) == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
MPI_DOUBLE, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
dest[i + j*iknsupc] += lsum[i + il + j*iknsupc];
if ( !brecv[ik] ) { /* Becomes a leaf node. */
bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
#ifdef _CRAY
STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
stat->ops[SOLVE] += iknsupc * (iknsupc + 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( bsendx_plist[lk1][p] != EMPTY ) {
pi = PNUM( p, gikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
MPI_DOUBLE, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
dlsum_bmod(lsum, x, &x[ii], nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
send_req, stat);
} /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
} /* dlSUM_BMOD */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void dlsum_fmod_inv
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local) */
double *xk, /* X[k]. */
double *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t *leaf_send,
int_t *nleaf_send,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
double alpha = 1.0, beta = 0.0,malpha=-1.0;
double *lusup, *lusup1;
double *dest;
double *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r,m;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
double *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
int knsupc; /* Size of supernode k. */
int_t nlb; /* Number of L blocks. */
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
nlb = lsub[0] - 1;
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>8*maxsuper){
// if(0){
// Nchunk=floor(num_thread/2.0)+1;
Nchunk=SUPERLU_MIN(num_thread,nlb);
// Nchunk=1;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j,nleaf_send_tmp) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
lsum[il+irow + j*iknsupc+sizelsum*thread_id1] -= rtemp_loc[nbrow_ref+i + j*nbrow];
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
for (lb=lbstart;lb<lbend;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
// RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],'d');
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
x[i + ii + j*iknsupc] += lsum[i + il + j*iknsupc ];
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f\n",x[ii+i]);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += iknsupc * (iknsupc - 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
dlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id1,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
dgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
lsum[il+irow + j*iknsupc+sizelsum*thread_id] -= rtemp_loc[nbrow_ref+i + j*nbrow];
}
nbrow_ref+=nbrow1;
}
// TOC(t3, t1);
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
x[i + ii + j*iknsupc] += lsum[i + il + j*iknsupc ];
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f\n",x[ii+i]);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += iknsupc * (iknsupc - 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
// printf("nleaf_send_tmp %5d lk %5d\n",nleaf_send_tmp);
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
// BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],'d');
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1) untied priority(1)
// #endif
{
dlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
}
stat[thread_id]->ops[SOLVE] += 2 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* dLSUM_FMOD_INV */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void dlsum_fmod_inv_master
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local) */
double *xk, /* X[k]. */
double *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
double alpha = 1.0, beta = 0.0,malpha=-1.0;
double *lusup, *lusup1;
double *dest;
double *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
int m;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
double *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
// printf("ya1 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lsub = Llu->Lrowind_bc_ptr[lk];
// printf("ya2 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
// idx_lsum = Llu->Lrowind_bc_2_lsum[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>4*maxsuper || nrhs>10){
// if(m<1){
// TIC(t1);
Nchunk=num_thread;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j) untied
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
dgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
lsum[il+irow + j*iknsupc] -= rtemp_loc[nbrow_ref+i + j*nbrow];
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
dgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
lsum[il+irow + j*iknsupc+sizelsum*thread_id] -= rtemp_loc[nbrow_ref+i + j*nbrow];
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
// TOC(t3, t1);
rtemp_loc = &rtemp[sizertemp* thread_id];
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
// --fmod[lk];
lptr1_tmp = lloc[lb+idx_i];
// luptr_tmp = lloc[lb+idx_v];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
// nbrow = lsub[lptr1_tmp+1];
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
// if(frecv[lk]==0){
// fmod[lk] = -1;
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(LRtree_ptr[lk],'d')*nrhs+LSUM_H,'d');
// }
} else { /* Diagonal process: X[i] += lsum[i]. */
// if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( lk );
// for (jj=0;jj<num_thread;jj++)
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
x[i + ii + j*iknsupc] += lsum[i + il + j*iknsupc ];
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f\n",x[ii+i]);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += iknsupc * (iknsupc - 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL)
BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'d')*nrhs+XK_H,'d');
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
nlb1 = lsub1[0] - 1;
dlsum_fmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, xsup,
grid, Llu, stat,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
stat[thread_id]->ops[SOLVE] += 2 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* dLSUM_FMOD_INV */
/************************************************************************/
void dlsum_bmod_inv
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local). */
double *xk, /* X[k]. */
double *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t* root_send,
int_t* nroot_send,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
double alpha = 1.0, beta = 0.0;
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
double *uval, *dest, *y;
int_t *lsub;
double *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
double *rtemp_loc;
int_t nroot_send_tmp;
double *Uinv;/* Inverse of diagonal block */
double temp;
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
if(Llu->Unnz[lk]>knsupc*64 || nub>16){
// if(nub>num_thread){
// if(nub>16){
// // // // if(Urbs2[lk]>num_thread){
// if(Urbs2[lk]>0){
Nchunk=SUPERLU_MIN(num_thread,nub);
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
// printf("Unnz: %5d nub: %5d knsupc: %5d\n",Llu->Unnz[lk],nub,knsupc);
#ifdef _OPENMP
#pragma omp taskloop firstprivate (stat) private (thread_id1,Uinv,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,lk1,gik,gikcol,usub,uval,lsub,lusup,iknsupc,il,i,irow,bmod_tmp,p,ii,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz,nsupr) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
dest[irow - ikfrow] -= uval[uptr++] * y[jj];
stat[thread_id1]->ops[SOLVE] += 2 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'d');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
dest[i + j*iknsupc] += lsum[i + il + j*iknsupc];
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f\n",x[ii+i]);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += iknsupc * (iknsupc + 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'d');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
dlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id1,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
}
} else {
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
dest[irow - ikfrow] -= uval[uptr++] * y[jj];
stat[thread_id]->ops[SOLVE] += 2 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'d');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
dest[i + j*iknsupc] += lsum[i + il + j*iknsupc];
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += iknsupc * (iknsupc + 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'d');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
// if(Urbs[lk1]>16){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
// dlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
// Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
// stat, root_send, nroot_send, sizelsum,sizertemp);
//}else{
dlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id,num_thread);
//}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
}
} /* dlSUM_BMOD_inv */
/************************************************************************/
void dlsum_bmod_inv_master
/************************************************************************/
(
double *lsum, /* Sum of local modifications. */
double *x, /* X array (local). */
double *xk, /* X[k]. */
double *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
double alpha = 1.0, beta = 0.0;
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
double *uval, *dest, *y;
int_t *lsub;
double *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
double *rtemp_loc;
double temp;
double *Uinv;/* Inverse of diagonal block */
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
rtemp_loc = &rtemp[sizertemp* thread_id];
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
// printf("Urbs2[lk] %5d lk %5d nub %5d\n",Urbs2[lk],lk,nub);
// fflush(stdout);
if(nub>num_thread){
// if(nub>0){
Nchunk=num_thread;
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
//#ifdef _OPENMP
//#pragma omp taskloop firstprivate (stat) private (thread_id1,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,gik,usub,uval,iknsupc,il,i,irow,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz) untied
//#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
dest[irow - ikfrow] -= uval[uptr++] * y[jj];
stat[thread_id1]->ops[SOLVE] += 2 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}else{
rtemp_loc = &rtemp[sizertemp* thread_id];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
dest[irow - ikfrow] -= uval[uptr++] * y[jj];
stat[thread_id]->ops[SOLVE] += 2 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],RdTree_GetMsgSize(URtree_ptr[ik],'d')*nrhs+LSUM_H,'d');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
lsum[il + jj ] += lsum[il + jj + ii*sizelsum];
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
dest[i + j*iknsupc] += lsum[i + il + j*iknsupc];
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
SGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
dgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
x[ii+i] = rtemp_loc[i];
}
}else{
#ifdef _CRAY
STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
dtrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += iknsupc * (iknsupc + 1) * nrhs;
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk1],'d')*nrhs+XK_H,'d');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
dlsum_bmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, sizelsum,sizertemp,thread_id,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
} /* dlsum_bmod_inv_master */
|
blockchain.c | /*********************************************************************
Homework 5
CS 110: Computer Architecture, Spring 2021
ShanghaiTech University
* Last Modified: 03/28/2021
*********************************************************************/
#include "blockchain.h"
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "hash_functions/sha256.h"
#include <stddef.h>
#include <stdio.h>
#define bwLittleEndian32(addr, x) ((*((WORD *)(addr))) = __builtin_bswap32(x))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
void getSha256State(blk_t *node, WORD state[8]) {
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, (unsigned char *)node, 256);
memcpy(state, ctx.state, 8 * sizeof(WORD));
}
void blockchain_node_init(blk_t *node, uint32_t index, uint32_t timestamp,
unsigned char prev_hash[32], unsigned char *data,
size_t data_size) {
if (!node || !data || !prev_hash)
return;
node->header.index = index;
node->header.timestamp = timestamp;
node->header.nonce = -1;
memset(node->header.data, 0, sizeof(unsigned char) * 256);
memcpy(node->header.prev_hash, prev_hash, HASH_BLOCK_SIZE);
memcpy(node->header.data, data,
sizeof(unsigned char) * ((data_size < 256) ? data_size : 256));
}
void blockchain_node_hash(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE],
hash_func func) {
if (node)
func((unsigned char *)node, sizeof(blkh_t), (unsigned char *)hash_buf);
}
BOOL blockchain_node_verify(blk_t *node, blk_t *prev_node, hash_func func) {
unsigned char hash_buf[HASH_BLOCK_SIZE];
if (!node || !prev_node)
return False;
blockchain_node_hash(node, hash_buf, func);
if (memcmp(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE))
return False;
blockchain_node_hash(prev_node, hash_buf, func);
if (memcmp(node->header.prev_hash, hash_buf,
sizeof(unsigned char) * HASH_BLOCK_SIZE))
return False;
return True;
}
#define THREAD_NUM 20
#define NONCE_STEP (THREAD_NUM)
#define USE_AVX 1
#ifndef USE_AVX
#define USE_AVX 0
#endif
struct __attribute__((aligned(64))) ThreadData {
blkh_t header;
unsigned char hash_buf[HASH_BLOCK_SIZE];
int flag;
};
struct __attribute__((aligned(64))) ThreadDataAvx {
uint64_t nonce;
unsigned char hash_buf[HASH_BLOCK_SIZE];
int flag;
};
// extern void sha256_transform(SHA256_CTX *ctx, const BYTE data[]);
extern void transform8way(WORD state[8], const unsigned char *data, uint64_t nonce, WORD outputState[8][8]);
/* The sequiental implementation of mining implemented for you. */
void blockchain_node_mine(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE],
size_t diff, hash_func func) {
if (!USE_AVX || sizeof(blkh_t) != 304) {
int any_find_flag = 0, i;
unsigned char one_diff[HASH_BLOCK_SIZE];
size_t diff_q, diff_m;
diff_q = diff / 8;
diff_m = diff % 8;
memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE);
memset(one_diff, 0, sizeof(unsigned char) * diff_q);
one_diff[diff_q] = ((uint8_t)0xFF) >> diff_m;
struct ThreadData thData[THREAD_NUM];
for (i = 0; i < THREAD_NUM; ++i) thData[i].flag = 0;
const int BEFORE_SIZE = offsetof(blkh_t, nonce);
const int AFTER_SIZE = sizeof(blkh_t) - BEFORE_SIZE - 8;
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, (unsigned char *)node, BEFORE_SIZE);
#pragma omp parallel num_threads(THREAD_NUM)
{
const int id = omp_get_thread_num();
memcpy(&thData[id].header, node, sizeof(blkh_t));
thData[id].header.nonce = id;
SHA256_CTX curCtx;
for (; unlikely(!any_find_flag);) {
memcpy(&curCtx, &ctx, sizeof(SHA256_CTX));
sha256_update(&curCtx, (BYTE *)&thData[id].header.nonce, 8);
sha256_update(&curCtx, ((BYTE *)&thData[id].header) + BEFORE_SIZE + 8, AFTER_SIZE);
sha256_final(&curCtx, thData[id].hash_buf);
// blockchain_node_hash((blk_t *)&thData[id].header, thData[id].hash_buf, func);
if (unlikely(likely((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q))) &&
unlikely(memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0))) {
any_find_flag = 1;
thData[id].flag = 1;
break;
}
thData[id].header.nonce += NONCE_STEP;
}
}
for (i = 0; i < THREAD_NUM; ++i) {
if (thData[i].flag) {
node->header.nonce = thData[i].header.nonce;
memcpy(hash_buf, thData[i].hash_buf, HASH_BLOCK_SIZE);
memcpy(node->hash, thData[i].hash_buf, HASH_BLOCK_SIZE);
break;
}
}
(void)func;
} else {
int any_find_flag = 0, i;
unsigned char one_diff[HASH_BLOCK_SIZE];
size_t diff_q, diff_m;
diff_q = diff / 8;
diff_m = diff % 8;
memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE);
memset(one_diff, 0, sizeof(unsigned char) * diff_q);
one_diff[diff_q] = ((uint8_t)0xFF) >> diff_m;
struct ThreadDataAvx thData[THREAD_NUM];
for (i = 0; i < THREAD_NUM; ++i) thData[i].flag = 0;
WORD state[8];
getSha256State(node, state);
unsigned char ctxData[64];
memcpy(ctxData, ((BYTE *)node) + 256, 40);
ctxData[48] = 0x80;
memset(ctxData + 48 + 1, 0, 55 + 6 - 48);
// bitlen = 4 * 512
// datalen = 48
// bitlen += datalen * 8
// bitlen = 2432
ctxData[63] = (BYTE)2432;
ctxData[62] = (BYTE)(2432 >> 8);
#pragma omp parallel num_threads(THREAD_NUM)
{
const int id = omp_get_thread_num();
thData[id].nonce = id * 8;
// SHA256_CTX curCtx;
// unsigned char curData[64];
// memcpy(curData, ctxData, 64);
WORD outputState[8][8];
for (; !any_find_flag;) {
// *(uint64_t *)(curData + 40) = thData[id].nonce;
// memcpy(curCtx.state, state, sizeof(state));
// sha256_transform(&curCtx, curData);
// bwLittleEndian32(thData[id].hash_buf, curCtx.state[0]);
// bwLittleEndian32(thData[id].hash_buf + 4, curCtx.state[1]);
// bwLittleEndian32(thData[id].hash_buf + 8, curCtx.state[2]);
// bwLittleEndian32(thData[id].hash_buf + 12, curCtx.state[3]);
// bwLittleEndian32(thData[id].hash_buf + 16, curCtx.state[4]);
// bwLittleEndian32(thData[id].hash_buf + 20, curCtx.state[5]);
// bwLittleEndian32(thData[id].hash_buf + 24, curCtx.state[6]);
// bwLittleEndian32(thData[id].hash_buf + 28, curCtx.state[7]);
// if ((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
// memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
// sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
// any_find_flag = 1;
// thData[id].flag = 1;
// break;
// }
// thData[id].nonce += NONCE_STEP;
transform8way(state, ctxData, thData[id].nonce, outputState);
for (int now = 0; now < 8; ++now) {
bwLittleEndian32(thData[id].hash_buf, outputState[0][now]);
bwLittleEndian32(thData[id].hash_buf + 4, outputState[1][now]);
bwLittleEndian32(thData[id].hash_buf + 8, outputState[2][now]);
bwLittleEndian32(thData[id].hash_buf + 12, outputState[3][now]);
bwLittleEndian32(thData[id].hash_buf + 16, outputState[4][now]);
bwLittleEndian32(thData[id].hash_buf + 20, outputState[5][now]);
bwLittleEndian32(thData[id].hash_buf + 24, outputState[6][now]);
bwLittleEndian32(thData[id].hash_buf + 28, outputState[7][now]);
if ((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
thData[id].nonce += 7 - now;
any_find_flag = 1;
thData[id].flag = 1;
break;
}
}
if (thData[id].flag) break;
thData[id].nonce += NONCE_STEP * 8;
}
}
for (i = 0; i < THREAD_NUM; ++i) {
if (thData[i].flag) {
node->header.nonce = thData[i].nonce;
memcpy(hash_buf, thData[i].hash_buf, HASH_BLOCK_SIZE);
memcpy(node->hash, hash_buf, HASH_BLOCK_SIZE);
break;
}
}
(void)func;
}
// while (True) {
// blockchain_node_hash(node, hash_buf, func);
// if ((!memcmp(hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
// memcmp(&hash_buf[diff_q], &one_diff[diff_q],
// sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
// memcpy(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE);
// break;
// }
// node->header.nonce++;
// }
// printf("nonce: %lu\n", node->header.nonce);
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright @ 2008 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define FOURCC_DX10 0x30315844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#define DDSEXT_DIMENSION_TEX2D 0x00000003
#define DDSEXTFLAGS_CUBEMAP 0x00000004
typedef enum DXGI_FORMAT
{
DXGI_FORMAT_UNKNOWN,
DXGI_FORMAT_R32G32B32A32_TYPELESS,
DXGI_FORMAT_R32G32B32A32_FLOAT,
DXGI_FORMAT_R32G32B32A32_UINT,
DXGI_FORMAT_R32G32B32A32_SINT,
DXGI_FORMAT_R32G32B32_TYPELESS,
DXGI_FORMAT_R32G32B32_FLOAT,
DXGI_FORMAT_R32G32B32_UINT,
DXGI_FORMAT_R32G32B32_SINT,
DXGI_FORMAT_R16G16B16A16_TYPELESS,
DXGI_FORMAT_R16G16B16A16_FLOAT,
DXGI_FORMAT_R16G16B16A16_UNORM,
DXGI_FORMAT_R16G16B16A16_UINT,
DXGI_FORMAT_R16G16B16A16_SNORM,
DXGI_FORMAT_R16G16B16A16_SINT,
DXGI_FORMAT_R32G32_TYPELESS,
DXGI_FORMAT_R32G32_FLOAT,
DXGI_FORMAT_R32G32_UINT,
DXGI_FORMAT_R32G32_SINT,
DXGI_FORMAT_R32G8X24_TYPELESS,
DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
DXGI_FORMAT_R10G10B10A2_TYPELESS,
DXGI_FORMAT_R10G10B10A2_UNORM,
DXGI_FORMAT_R10G10B10A2_UINT,
DXGI_FORMAT_R11G11B10_FLOAT,
DXGI_FORMAT_R8G8B8A8_TYPELESS,
DXGI_FORMAT_R8G8B8A8_UNORM,
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
DXGI_FORMAT_R8G8B8A8_UINT,
DXGI_FORMAT_R8G8B8A8_SNORM,
DXGI_FORMAT_R8G8B8A8_SINT,
DXGI_FORMAT_R16G16_TYPELESS,
DXGI_FORMAT_R16G16_FLOAT,
DXGI_FORMAT_R16G16_UNORM,
DXGI_FORMAT_R16G16_UINT,
DXGI_FORMAT_R16G16_SNORM,
DXGI_FORMAT_R16G16_SINT,
DXGI_FORMAT_R32_TYPELESS,
DXGI_FORMAT_D32_FLOAT,
DXGI_FORMAT_R32_FLOAT,
DXGI_FORMAT_R32_UINT,
DXGI_FORMAT_R32_SINT,
DXGI_FORMAT_R24G8_TYPELESS,
DXGI_FORMAT_D24_UNORM_S8_UINT,
DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
DXGI_FORMAT_X24_TYPELESS_G8_UINT,
DXGI_FORMAT_R8G8_TYPELESS,
DXGI_FORMAT_R8G8_UNORM,
DXGI_FORMAT_R8G8_UINT,
DXGI_FORMAT_R8G8_SNORM,
DXGI_FORMAT_R8G8_SINT,
DXGI_FORMAT_R16_TYPELESS,
DXGI_FORMAT_R16_FLOAT,
DXGI_FORMAT_D16_UNORM,
DXGI_FORMAT_R16_UNORM,
DXGI_FORMAT_R16_UINT,
DXGI_FORMAT_R16_SNORM,
DXGI_FORMAT_R16_SINT,
DXGI_FORMAT_R8_TYPELESS,
DXGI_FORMAT_R8_UNORM,
DXGI_FORMAT_R8_UINT,
DXGI_FORMAT_R8_SNORM,
DXGI_FORMAT_R8_SINT,
DXGI_FORMAT_A8_UNORM,
DXGI_FORMAT_R1_UNORM,
DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
DXGI_FORMAT_R8G8_B8G8_UNORM,
DXGI_FORMAT_G8R8_G8B8_UNORM,
DXGI_FORMAT_BC1_TYPELESS,
DXGI_FORMAT_BC1_UNORM,
DXGI_FORMAT_BC1_UNORM_SRGB,
DXGI_FORMAT_BC2_TYPELESS,
DXGI_FORMAT_BC2_UNORM,
DXGI_FORMAT_BC2_UNORM_SRGB,
DXGI_FORMAT_BC3_TYPELESS,
DXGI_FORMAT_BC3_UNORM,
DXGI_FORMAT_BC3_UNORM_SRGB,
DXGI_FORMAT_BC4_TYPELESS,
DXGI_FORMAT_BC4_UNORM,
DXGI_FORMAT_BC4_SNORM,
DXGI_FORMAT_BC5_TYPELESS,
DXGI_FORMAT_BC5_UNORM,
DXGI_FORMAT_BC5_SNORM,
DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_FORMAT_B8G8R8X8_UNORM,
DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM,
DXGI_FORMAT_B8G8R8A8_TYPELESS,
DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
DXGI_FORMAT_B8G8R8X8_TYPELESS,
DXGI_FORMAT_B8G8R8X8_UNORM_SRGB,
DXGI_FORMAT_BC6H_TYPELESS,
DXGI_FORMAT_BC6H_UF16,
DXGI_FORMAT_BC6H_SF16,
DXGI_FORMAT_BC7_TYPELESS,
DXGI_FORMAT_BC7_UNORM,
DXGI_FORMAT_BC7_UNORM_SRGB,
DXGI_FORMAT_AYUV,
DXGI_FORMAT_Y410,
DXGI_FORMAT_Y416,
DXGI_FORMAT_NV12,
DXGI_FORMAT_P010,
DXGI_FORMAT_P016,
DXGI_FORMAT_420_OPAQUE,
DXGI_FORMAT_YUY2,
DXGI_FORMAT_Y210,
DXGI_FORMAT_Y216,
DXGI_FORMAT_NV11,
DXGI_FORMAT_AI44,
DXGI_FORMAT_IA44,
DXGI_FORMAT_P8,
DXGI_FORMAT_A8P8,
DXGI_FORMAT_B4G4R4A4_UNORM,
DXGI_FORMAT_P208,
DXGI_FORMAT_V208,
DXGI_FORMAT_V408,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE,
DXGI_FORMAT_FORCE_UINT
} DXGI_FORMAT;
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2,
extFormat,
extDimension,
extFlags,
extArraySize,
extFlags2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _BC7Colors
{
unsigned char
r[6],
g[6],
b[6],
a[6];
} BC7Colors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColorLookup
{
DDSSourceBlock sources[2];
} DDSSingleColorLookup;
typedef struct _BC7ModeInfo
{
unsigned char
partition_bits,
num_subsets,
color_precision,
alpha_precision,
num_pbits,
index_precision,
index2_precision;
} BC7ModeInfo;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *);
static const DDSSingleColorLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColorLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColorLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 };
static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 };
static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34,
38, 43, 47, 51, 55, 60, 64 };
/* stores info for each mode of BC7 */
static const BC7ModeInfo BC7_mode_info[8] =
{
{ 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */
{ 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */
{ 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */
{ 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */
{ 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */
{ 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */
{ 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */
{ 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */
};
static const unsigned char BC7_partition_table[2][64][16] =
{
{ /* BC7 Partition Set for 2 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 },
{ 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 },
{ 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 },
{ 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 },
{ 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 }
},
{ /* BC7 Partition Set for 3 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 },
{ 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 },
{ 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 },
{ 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 },
{ 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 },
{ 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 },
{ 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 },
{ 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 },
{ 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 },
{ 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 }
}
};
static const unsigned char BC7_anchor_index_table[4][64] =
{
/* Anchor index values for the first subset */
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
},
/* Anchor index values for the second subset of two-subset partitioning */
{
15,15,15,15,15,15,15,15,
15,15,15,15,15,15,15,15,
15, 2, 8, 2, 2, 8, 8,15,
2, 8, 2, 2, 8, 8, 2, 2,
15,15, 6, 8, 2, 8,15,15,
2, 8, 2, 2, 2,15,15, 6,
6, 2, 6, 8,15,15, 2, 2,
15,15,15,15,15, 2, 2,15
},
/* Anchor index values for the second subset of three-subset partitioning */
{
3, 3,15,15, 8, 3,15,15,
8, 8, 6, 6, 6, 5, 3, 3,
3, 3, 8,15, 3, 3, 6,10,
5, 8, 8, 6, 8, 5,15,15,
8,15, 3, 5, 6,10, 8,15,
15, 3,15, 5,15,15,15,15,
3,15, 5, 5, 5, 8, 5,10,
5,10, 8,13,15,12, 3, 3
},
/* Anchor index values for the third subset of three-subset partitioning */
{
15, 8, 8, 3,15,15, 3, 8,
15,15,15,15,15,15,15, 8,
15, 8,15, 3,15, 8,15, 8,
3,15, 6,10,15,15,10, 8,
15, 3,15,10,10, 8, 9,10,
6,15, 8,15, 3, 6, 6, 8,
15, 3,15,15,15,15,15,15,
15,15,15,15, 3,15,15, 8
}
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static inline unsigned char GetSubsetIndex(unsigned char numSubsets,
unsigned char partition_id,size_t pixelIndex)
{
if (numSubsets == 2)
return BC7_partition_table[0][partition_id][pixelIndex];
if (numSubsets == 3)
return BC7_partition_table[1][partition_id][pixelIndex];
return 0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
/* Read optional DX10 header if available */
if ((dds_info->pixelformat.flags & DDPF_FOURCC) &&
(dds_info->pixelformat.fourcc == FOURCC_DX10))
{
dds_info->extFormat = ReadBlobLSBLong(image);
dds_info->extDimension = ReadBlobLSBLong(image);
dds_info->extFlags = ReadBlobLSBLong(image);
dds_info->extArraySize = ReadBlobLSBLong(image);
dds_info->extFlags2 = ReadBlobLSBLong(image);
}
else
{
dds_info->extFormat = 0;
dds_info->extDimension = 0;
dds_info->extFlags = 0;
dds_info->extArraySize = 0;
dds_info->extFlags2 = 0;
}
return(MagickTrue);
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
Quantum
*q;
ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static unsigned char GetBit(const unsigned char *block,size_t *start_bit)
{
size_t
base,
index;
index=(*start_bit) >> 3;
base=(*start_bit) - (index << 3);
(*start_bit)++;
if (index > 15)
return(0);
return((block[index] >> base) & 0x01);
}
static unsigned char GetBits(const unsigned char *block,size_t *start_bit,
unsigned char num_bits)
{
size_t
base,
first_bits,
index,
next_bits;
unsigned char
ret;
index=(*start_bit) >> 3;
base=(*start_bit)-(index << 3);
if (index > 15)
return(0);
if (base + num_bits > 8)
{
first_bits=8-base;
next_bits=num_bits-first_bits;
ret=((block[index] >> base) | (((block[index + 1]) &
((1u << next_bits) - 1)) << first_bits));
}
else
{
ret=((block[index] >> base) & ((1 << num_bits) - 1));
}
(*start_bit)+=num_bits;
return(ret);
}
static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index,
unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id)
{
size_t
table_index;
/* for first subset */
if (subset_index == 0)
table_index=0;
/* for second subset of two subset partitioning */
else if ((subset_index == 1) && (num_subsets == 2))
table_index=1;
/* for second subset of three subset partitioning */
else if ((subset_index == 1) && (num_subsets == 3))
table_index=2;
/* for third subset of three subset partitioning */
else
table_index=3;
if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex)
return(MagickTrue);
else
return(MagickFalse);
}
static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block,
size_t mode,size_t *start_bit)
{
MagickBooleanType
has_alpha,
has_pbits;
unsigned char
alpha_bits,
color_bits,
pbit,
pbit0,
pbit1;
size_t
num_subsets,
i;
num_subsets=(size_t) BC7_mode_info[mode].num_subsets;
color_bits=BC7_mode_info[mode].color_precision;
/* red */
for (i=0; i < num_subsets * 2; i++)
endpoints->r[i]=GetBits(block,start_bit,color_bits);
/* green */
for (i=0; i < num_subsets * 2; i++)
endpoints->g[i]=GetBits(block,start_bit,color_bits);
/* blue */
for (i=0; i < num_subsets * 2; i++)
endpoints->b[i]=GetBits(block,start_bit,color_bits);
/* alpha */
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=255;
alpha_bits=BC7_mode_info[mode].alpha_precision;
has_alpha=mode >= 4 ? MagickTrue : MagickFalse;
if (has_alpha != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=GetBits(block,start_bit,alpha_bits);
}
/* handle modes that have p bits */
has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7) ? MagickTrue : MagickFalse;
if (has_pbits != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= 1;
endpoints->g[i] <<= 1;
endpoints->b[i] <<= 1;
endpoints->a[i] <<= 1;
}
/* mode 1 shares a p-bit for both endpoints */
if (mode == 1)
{
pbit0=GetBit(block,start_bit);
pbit1=GetBit(block,start_bit);
endpoints->r[0] |= pbit0;
endpoints->g[0] |= pbit0;
endpoints->b[0] |= pbit0;
endpoints->r[1] |= pbit0;
endpoints->g[1] |= pbit0;
endpoints->b[1] |= pbit0;
endpoints->r[2] |= pbit1;
endpoints->g[2] |= pbit1;
endpoints->b[2] |= pbit1;
endpoints->r[3] |= pbit1;
endpoints->g[3] |= pbit1;
endpoints->b[3] |= pbit1;
}
else
{
for (i=0; i < num_subsets * 2; i++)
{
pbit=GetBit(block,start_bit);
endpoints->r[i] |= pbit;
endpoints->g[i] |= pbit;
endpoints->b[i] |= pbit;
endpoints->a[i] |= pbit;
}
}
}
/* 1 bit increased due to the pbit */
if (has_pbits != MagickFalse)
{
color_bits++;
alpha_bits++;
}
/* color and alpha bit shifting so that MSB lies in bit 7 */
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= (8 - color_bits);
endpoints->g[i] <<= (8 - color_bits);
endpoints->b[i] <<= (8 - color_bits);
endpoints->a[i] <<= (8 - alpha_bits);
endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits);
endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits);
endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits);
endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> alpha_bits);
}
if (has_alpha == MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=255;
}
}
static MagickBooleanType ReadBC7Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
BC7Colors
colors = { 0 };
Quantum
*q;
size_t
mode,
start_bit;
ssize_t
count,
i,
x,
y;
unsigned char
a,
alpha_indices[16],
b,
block[16],
c0,
c1,
color_indices[16],
g,
index_prec,
index2_prec,
num_bits,
num_subsets,
partition_id,
r,
rotation,
selector_bit,
subset_indices[16],
weight;
magick_unreferenced(dds_info);
memset(alpha_indices,0,sizeof(alpha_indices));
memset(block,0,sizeof(block));
memset(color_indices,0,sizeof(color_indices));
memset(subset_indices,0,sizeof(subset_indices));
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
size_t
area;
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 16 bytes of data from the image */
count=ReadBlob(image,16,block);
if (count != 16)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Get the mode of the block */
start_bit=0;
while (start_bit <= 8 && !GetBit(block, &start_bit)) {}
mode=start_bit-1;
if (mode > 7)
return(MagickFalse);
num_subsets=BC7_mode_info[mode].num_subsets;
partition_id=0;
/* only these modes have more than 1 subset */
if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7))
{
partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits);
if (partition_id > 63)
return(MagickFalse);
}
rotation=0;
if ((mode == 4) || (mode == 5))
rotation=GetBits(block,&start_bit,2);
selector_bit=0;
if (mode == 4)
selector_bit=GetBit(block, &start_bit);
ReadEndpoints(&colors,block,mode,&start_bit);
index_prec=BC7_mode_info[mode].index_precision;
index2_prec=BC7_mode_info[mode].index2_precision;
if ((mode == 4) && (selector_bit == 1))
{
index_prec=3;
alpha_indices[0]=GetBit(block,&start_bit);
for (i = 1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,2);
}
/* get color and subset indices */
for (i=0; i < 16; i++)
{
subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i);
num_bits=index_prec;
if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id))
num_bits--;
color_indices[i]=GetBits(block,&start_bit,num_bits);
}
/* get alpha indices if the block has it */
if ((mode == 5) || ((mode == 4) && (selector_bit == 0)))
{
alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1);
for (i=1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,index2_prec);
}
/* Write the pixels */
area=MagickMin(MagickMin(4,image->columns-x)*MagickMin(4,image->rows-y),
16);
for (i=0; i < (ssize_t) area; i++)
{
unsigned char
c2;
c0=2 * subset_indices[i];
c1=(2 * subset_indices[i]) + 1;
c2=color_indices[i];
weight=64;
/* Color Interpolation */
switch(index_prec)
{
case 2:
if (c2 < sizeof(BC7_weight2))
weight=BC7_weight2[c2];
break;
case 3:
if (c2 < sizeof(BC7_weight3))
weight=BC7_weight3[c2];
break;
default:
if (c2 < sizeof(BC7_weight4))
weight=BC7_weight4[c2];
}
r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6;
g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6;
b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6;
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
/* Interpolate alpha for mode 4 and 5 blocks */
if (mode == 4 || mode == 5)
{
unsigned char
a0;
a0=alpha_indices[i];
if (a0 < sizeof(BC7_weight2))
weight=BC7_weight2[a0];
if ((mode == 4) && (selector_bit == 0) && (a0 < sizeof(BC7_weight3)))
weight=BC7_weight3[a0];
if ((c0 < sizeof(colors.a)) && (c1 < sizeof(colors.a)))
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
}
switch (rotation)
{
case 1:
Swap(a,r);
break;
case 2:
Swap(a,g);
break;
case 3:
Swap(a,b);
break;
}
SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
x,
y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32 ||
dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
alphaBits=1;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM ||
IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000))
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case FOURCC_DX10:
{
if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D)
{
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
switch (dds_info.extFormat)
{
case DXGI_FORMAT_R8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G6R5_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G5R5A1_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_R8G8B8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8X8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_BC1_UNORM:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case DXGI_FORMAT_BC2_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case DXGI_FORMAT_BC3_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case DXGI_FORMAT_BC7_UNORM:
case DXGI_FORMAT_BC7_UNORM_SRGB:
{
alpha_trait = BlendPixelTrait;
compression = BC7Compression;
decoder = ReadBC7;
break;
}
default:
{
/* Unknown format */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP)
cubemap = MagickTrue;
num_images = dds_info.extArraySize;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteDDSImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColorLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
ssize_t
x;
ssize_t
i,
y,
bx,
by;
const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16] = { 0 };
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"dxt5") == 0)
compression=FOURCC_DXT5;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
array_swap.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int main(int argc, char* argv[])
{
int i, temp, n=5;
int a[5] = {1, 2, 3, 4, 5};
int b[5] = {6, 7, 8, 9, 10};
#pragma omp parallel for private(temp)
for (i=0;i<n;i++) {
temp = a[i];
a[i] = b[i];
b[i] = temp;
}
printf("thread %d\n", omp_get_max_threads());
} |
DRB016-outputdep-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The loop in this example cannot be parallelized.
This pattern has two pair of dependencies:
1. loop carried output dependence
x = .. :
2. loop carried true dependence due to:
.. = x;
x = ..;
Data race pairs: we allow two pairs to preserve the original code pattern.
1. x@73:12 vs. x@74:5
2. x@74:5 vs. x@74:5
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
int a[100];
int main()
{
omprace_init();
int len=100;
int i,x=10;
#pragma omp parallel for
for (i=0;i<len;i++)
{
a[i] = x;
x=i;
}
printf("x=%d",x);
omprace_fini();
return 0;
}
|
cones.c | #include "cones.h"
#include "linalg.h"
#include "scs.h"
#include "scs_blas.h" /* contains BLAS(X) macros and type info */
#include "util.h"
#define CONE_TOL (1e-9)
#define CONE_THRESH (1e-8)
#define EXP_CONE_MAX_ITERS (100)
#define BOX_CONE_MAX_ITERS (25)
#define POW_CONE_MAX_ITERS (20)
/* In the box cone projection we penalize the `t` term additionally by this
* factor. This encourages the `t` term to stay close to the incoming `t` term,
* which should provide better convergence since typically the `t` term does
* not appear in the linear system other than `t = 1`. Setting to 1 is
* the vanilla projection.
*/
#define BOX_T_SCALE (1.)
/* Box cone limits (+ or -) taken to be INF */
#define MAX_BOX_VAL (1e15)
#ifdef USE_LAPACK
void BLAS(syev)(const char *jobz, const char *uplo, blas_int *n, scs_float *a,
blas_int *lda, scs_float *w, scs_float *work, blas_int *lwork,
blas_int *info);
blas_int BLAS(syrk)(const char *uplo, const char *trans, const blas_int *n,
const blas_int *k, const scs_float *alpha,
const scs_float *a, const blas_int *lda,
const scs_float *beta, scs_float *c, const blas_int *ldc);
void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx,
const blas_int *incx);
#endif
/* set the vector of rho y terms, based on scale and cones */
void SCS(set_rho_y_vec)(const ScsCone *k, scs_float scale, scs_float *rho_y_vec,
scs_int m) {
scs_int i, count = 0;
/* f cone */
for (i = 0; i < k->z; ++i) {
/* set rho_y small for z, similar to rho_x term, since z corresponds to
* dual free cone, this effectively decreases penalty on those entries
* and lets them be determined almost entirely by the linear system solve
*/
rho_y_vec[i] = 1.0 / (1000. * scale);
}
count += k->z;
/* others */
for (i = count; i < m; ++i) {
rho_y_vec[i] = 1.0 / scale;
}
/* Note, if updating this to use different scales for other cones (e.g. box)
* then you must be careful to also include the effect of the rho_y_vec
* in the cone projection operator.
*/
/* Increase rho_y_vec for the t term in the box cone */
if (k->bsize) {
rho_y_vec[k->z + k->l] *= BOX_T_SCALE;
}
}
static inline scs_int get_sd_cone_size(scs_int s) {
return (s * (s + 1)) / 2;
}
/*
* boundaries will contain array of indices of rows of A corresponding to
* cone boundaries, boundaries[0] is starting index for cones of size strictly
* larger than 1, boundaries malloc-ed here so should be freed.
*/
scs_int SCS(set_cone_boundaries)(const ScsCone *k, scs_int **cone_boundaries) {
scs_int i, s_cone_sz, count = 0;
scs_int cone_boundaries_len =
1 + k->qsize + k->ssize + k->ed + k->ep + k->psize;
scs_int *b = (scs_int *)scs_calloc(cone_boundaries_len, sizeof(scs_int));
/* cones that can be scaled independently */
b[count] = k->z + k->l + k->bsize;
count += 1; /* started at 0 now move to first entry */
for (i = 0; i < k->qsize; ++i) {
b[count + i] = k->q[i];
}
count += k->qsize;
for (i = 0; i < k->ssize; ++i) {
s_cone_sz = get_sd_cone_size(k->s[i]);
b[count + i] = s_cone_sz;
}
count += k->ssize; /* add ssize here not ssize * (ssize + 1) / 2 */
/* exp cones */
for (i = 0; i < k->ep + k->ed; ++i) {
b[count + i] = 3;
}
count += k->ep + k->ed;
/* power cones */
for (i = 0; i < k->psize; ++i) {
b[count + i] = 3;
}
count += k->psize;
/* other cones */
*cone_boundaries = b;
return cone_boundaries_len;
}
static scs_int get_full_cone_dims(const ScsCone *k) {
scs_int i, c = k->z + k->l + k->bsize;
if (k->qsize) {
for (i = 0; i < k->qsize; ++i) {
c += k->q[i];
}
}
if (k->ssize) {
for (i = 0; i < k->ssize; ++i) {
c += get_sd_cone_size(k->s[i]);
}
}
if (k->ed) {
c += 3 * k->ed;
}
if (k->ep) {
c += 3 * k->ep;
}
if (k->psize) {
c += 3 * k->psize;
}
return c;
}
scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k) {
scs_int i;
if (get_full_cone_dims(k) != d->m) {
scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n",
(long)get_full_cone_dims(k), (long)d->m);
return -1;
}
if (k->z && k->z < 0) {
scs_printf("free cone dimension error\n");
return -1;
}
if (k->l && k->l < 0) {
scs_printf("lp cone dimension error\n");
return -1;
}
if (k->bsize) {
if (k->bsize < 0) {
scs_printf("box cone dimension error\n");
return -1;
}
for (i = 0; i < k->bsize - 1; ++i) {
if (k->bl[i] > k->bu[i]) {
scs_printf("infeasible: box lower bound larger than upper bound\n");
return -1;
}
}
}
if (k->qsize && k->q) {
if (k->qsize < 0) {
scs_printf("soc cone dimension error\n");
return -1;
}
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] < 0) {
scs_printf("soc cone dimension error\n");
return -1;
}
}
}
if (k->ssize && k->s) {
if (k->ssize < 0) {
scs_printf("sd cone dimension error\n");
return -1;
}
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] < 0) {
scs_printf("sd cone dimension error\n");
return -1;
}
}
}
if (k->ed && k->ed < 0) {
scs_printf("ep cone dimension error\n");
return -1;
}
if (k->ep && k->ep < 0) {
scs_printf("ed cone dimension error\n");
return -1;
}
if (k->psize && k->p) {
if (k->psize < 0) {
scs_printf("power cone dimension error\n");
return -1;
}
for (i = 0; i < k->psize; ++i) {
if (k->p[i] < -1 || k->p[i] > 1) {
scs_printf("power cone error, values must be in [-1,1]\n");
return -1;
}
}
}
return 0;
}
void SCS(finish_cone)(ScsConeWork *c) {
#ifdef USE_LAPACK
if (c->Xs) {
scs_free(c->Xs);
}
if (c->Z) {
scs_free(c->Z);
}
if (c->e) {
scs_free(c->e);
}
if (c->work) {
scs_free(c->work);
}
#endif
if (c->s) {
scs_free(c->s);
}
if (c->bu) {
scs_free(c->bu);
}
if (c->bl) {
scs_free(c->bl);
}
if (c) {
scs_free(c);
}
}
char *SCS(get_cone_header)(const ScsCone *k) {
char *tmp = (char *)scs_malloc(sizeof(char) * 512);
scs_int i, soc_vars, sd_vars;
sprintf(tmp, "cones: ");
if (k->z) {
sprintf(tmp + strlen(tmp), "\t z: primal zero / dual free vars: %li\n",
(long)k->z);
}
if (k->l) {
sprintf(tmp + strlen(tmp), "\t l: linear vars: %li\n", (long)k->l);
}
if (k->bsize) {
sprintf(tmp + strlen(tmp), "\t b: box cone vars: %li\n", (long)(k->bsize));
}
soc_vars = 0;
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; i++) {
soc_vars += k->q[i];
}
sprintf(tmp + strlen(tmp), "\t q: soc vars: %li, qsize: %li\n",
(long)soc_vars, (long)k->qsize);
}
sd_vars = 0;
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; i++) {
sd_vars += get_sd_cone_size(k->s[i]);
}
sprintf(tmp + strlen(tmp), "\t s: psd vars: %li, ssize: %li\n",
(long)sd_vars, (long)k->ssize);
}
if (k->ep || k->ed) {
sprintf(tmp + strlen(tmp), "\t e: exp vars: %li, dual exp vars: %li\n",
(long)(3 * k->ep), (long)(3 * k->ed));
}
if (k->psize && k->p) {
sprintf(tmp + strlen(tmp), "\t p: primal + dual power vars: %li\n",
(long)(3 * k->psize));
}
return tmp;
}
static scs_float exp_newton_one_d(scs_float rho, scs_float y_hat,
scs_float z_hat, scs_float w) {
scs_float t_prev, t = MAX(w - z_hat, MAX(-z_hat, 1e-9));
scs_float f = 1., fp = 1.;
scs_int i;
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
t_prev = t;
f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1;
fp = (2 * t + z_hat) / rho / rho + 1 / t;
t = t - f / fp;
if (t <= -z_hat) {
t = -z_hat;
break;
} else if (t <= 0) {
t = 0;
break;
} else if (ABS(t - t_prev) < CONE_TOL) {
break;
} else if (SQRTF(f * f / fp) < CONE_TOL) {
break;
}
}
if (i == EXP_CONE_MAX_ITERS) {
scs_printf("warning: exp cone newton step hit maximum %i iters\n", (int)i);
scs_printf("rho=%1.5e; y_hat=%1.5e; z_hat=%1.5e; w=%1.5e; f=%1.5e, "
"fp=%1.5e, t=%1.5e, t_prev= %1.5e\n",
rho, y_hat, z_hat, w, f, fp, t, t_prev);
}
return t + z_hat;
}
static void exp_solve_for_x_with_rho(const scs_float *v, scs_float *x,
scs_float rho, scs_float w) {
x[2] = exp_newton_one_d(rho, v[1], v[2], w);
x[1] = (x[2] - v[2]) * x[2] / rho;
x[0] = v[0] - rho;
}
static scs_float exp_calc_grad(const scs_float *v, scs_float *x, scs_float rho,
scs_float w) {
exp_solve_for_x_with_rho(v, x, rho, w);
if (x[1] <= 1e-12) {
return x[0];
}
return x[0] + x[1] * log(x[1] / x[2]);
}
static void exp_get_rho_ub(const scs_float *v, scs_float *x, scs_float *ub,
scs_float *lb) {
*lb = 0;
*ub = 0.125;
while (exp_calc_grad(v, x, *ub, v[1]) > 0) {
*lb = *ub;
(*ub) *= 2;
}
}
/* project onto the exponential cone, v has dimension *exactly* 3 */
static scs_int proj_exp_cone(scs_float *v) {
scs_int i;
scs_float ub, lb, rho, g, x[3];
scs_float r = v[0], s = v[1], t = v[2];
/* v in cl(Kexp) */
if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) ||
(r <= 0 && s == 0 && t >= 0)) {
return 0;
}
/* -v in Kexp^* */
if ((r > 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) ||
(r == 0 && s <= 0 && t <= 0)) {
memset(v, 0, 3 * sizeof(scs_float));
return 0;
}
/* special case with analytical solution */
if (r < 0 && s < 0) {
v[1] = 0.0;
v[2] = MAX(v[2], 0);
return 0;
}
/* iterative procedure to find projection, bisects on dual variable: */
exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
rho = (ub + lb) / 2; /* halfway between upper and lower bounds */
g = exp_calc_grad(v, x, rho, x[1]); /* calculates gradient wrt dual var */
if (g > 0) {
lb = rho;
} else {
ub = rho;
}
if (ub - lb < CONE_TOL) {
break;
}
}
#if VERBOSITY > 10
scs_printf("exponential cone proj iters %i\n", (int)i);
#endif
if (i == EXP_CONE_MAX_ITERS) {
scs_printf("warning: exp cone outer step hit maximum %i iters\n", (int)i);
scs_printf("r=%1.5e; s=%1.5e; t=%1.5e\n", r, s, t);
}
v[0] = x[0];
v[1] = x[1];
v[2] = x[2];
return 0;
}
static scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) {
scs_int i;
#ifdef USE_LAPACK
blas_int n_max = 0;
blas_int neg_one = -1;
blas_int info = 0;
scs_float wkopt = 0.0;
#if VERBOSITY > 0
#define _STR_EXPAND(tok) #tok
#define _STR(tok) _STR_EXPAND(tok)
scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func)));
#endif
/* eigenvector decomp workspace */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] > n_max) {
n_max = (blas_int)k->s[i];
}
}
c->Xs = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float));
c->Z = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float));
c->e = (scs_float *)scs_calloc(n_max, sizeof(scs_float));
/* workspace query */
BLAS(syev)
("Vectors", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, &wkopt, &neg_one,
&info);
if (info != 0) {
scs_printf("FATAL: syev failure, info = %li\n", (long)info);
return -1;
}
c->lwork = (blas_int)(wkopt + 1); /* +1 for int casting safety */
c->work = (scs_float *)scs_calloc(c->lwork, sizeof(scs_float));
if (!c->Xs || !c->Z || !c->e || !c->work) {
return -1;
}
return 0;
#else
for (i = 0; i < k->ssize; i++) {
if (k->s[i] > 1) {
scs_printf(
"FATAL: Cannot solve SDPs without linked blas+lapack libraries\n");
scs_printf(
"Install blas+lapack and re-compile SCS with blas+lapack library "
"locations\n");
return -1;
}
}
return 0;
#endif
}
/* size of X is get_sd_cone_size(n) */
static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n,
ScsConeWork *c) {
/* project onto the positive semi-definite cone */
#ifdef USE_LAPACK
scs_int i, first_idx;
blas_int nb = (blas_int)n;
blas_int ncols_z;
blas_int nb_plus_one = (blas_int)(n + 1);
blas_int one_int = 1;
scs_float zero = 0., one = 1.;
scs_float sqrt2 = SQRTF(2.0);
scs_float sqrt2_inv = 1.0 / sqrt2;
scs_float *Xs = c->Xs;
scs_float *Z = c->Z;
scs_float *e = c->e;
scs_float *work = c->work;
blas_int lwork = c->lwork;
blas_int info = 0;
scs_float sq_eig_pos;
#endif
if (n == 0) {
return 0;
}
if (n == 1) {
X[0] = MAX(X[0], 0.);
return 0;
}
#ifdef USE_LAPACK
/* copy lower triangular matrix into full matrix */
for (i = 0; i < n; ++i) {
memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]),
(n - i) * sizeof(scs_float));
}
/*
rescale so projection works, and matrix norm preserved
see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3
*/
/* scale diags by sqrt(2) */
BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */
/* Solve eigenproblem, reuse workspaces */
BLAS(syev)("Vectors", "Lower", &nb, Xs, &nb, e, work, &lwork, &info);
if (info != 0) {
scs_printf("WARN: LAPACK syev error, info = %i\n", (int)info);
if (info < 0) {
return info;
}
}
first_idx = -1;
/* e is eigvals in ascending order, find first entry > 0 */
for (i = 0; i < n; ++i) {
if (e[i] > 0) {
first_idx = i;
break;
}
}
if (first_idx == -1) {
/* there are no positive eigenvalues, set X to 0 and return */
memset(X, 0, sizeof(scs_float) * get_sd_cone_size(n));
return 0;
}
/* Z is matrix of eigenvectors with positive eigenvalues */
memcpy(Z, &Xs[first_idx * n], sizeof(scs_float) * n * (n - first_idx));
/* scale Z by sqrt(eig) */
for (i = first_idx; i < n; ++i) {
sq_eig_pos = SQRTF(e[i]);
BLAS(scal)(&nb, &sq_eig_pos, &Z[(i - first_idx) * n], &one_int);
}
/* Xs = Z Z' = V E V' */
ncols_z = (blas_int)(n - first_idx);
BLAS(syrk)("Lower", "NoTrans", &nb, &ncols_z, &one, Z, &nb, &zero, Xs, &nb);
/* undo rescaling: scale diags by 1/sqrt(2) */
BLAS(scal)(&nb, &sqrt2_inv, Xs, &nb_plus_one); /* not n_squared */
/* extract just lower triangular matrix */
for (i = 0; i < n; ++i) {
memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]),
(n - i) * sizeof(scs_float));
}
return 0;
#else
scs_printf("FAILURE: solving SDP but no blas/lapack libraries were found!\n");
scs_printf("SCS will return nonsense!\n");
SCS(scale_array)(X, NAN, n);
return -1;
#endif
}
static scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh,
scs_float a) {
scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r));
return MAX(x, 1e-12);
}
static scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh,
scs_float r, scs_float a) {
return a * (rh - 2 * r) / (2 * x - xh);
}
static scs_float pow_calc_f(scs_float x, scs_float y, scs_float r,
scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) - r;
}
static scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr,
scs_float dydr, scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) -
1;
}
/*
* Routine to scale the limits of the box cone by the scaling diagonal mat D > 0
*
* want (t, s) \in K <==> (t', s') \in K'
*
* (t', s') = (d0 * t, D s) (overloading D to mean D[1:])
* (up to scalar scaling factor which we can ignore due to conic prooperty)
*
* K = { (t, s) | t * l <= s <= t * u, t >= 0 } =>
* { (t, s) | d0 * t * D l / d0 <= D s <= d0 * t D u / d0, t >= 0 } =>
* { (t', s') | t' * l' <= s' <= t' u', t >= 0 } = K'
* where l' = D l / d0, u' = D u / d0.
*/
static void normalize_box_cone(ScsConeWork *c, scs_float *D, scs_int bsize) {
scs_int j;
for (j = 0; j < bsize - 1; j++) {
if (c->bu[j] >= MAX_BOX_VAL) {
c->bu[j] = INFINITY;
} else {
c->bu[j] = D ? D[j + 1] * c->bu[j] / D[0] : c->bu[j];
}
if (c->bl[j] <= -MAX_BOX_VAL) {
c->bl[j] = -INFINITY;
} else {
c->bl[j] = D ? D[j + 1] * c->bl[j] / D[0] : c->bl[j];
}
}
}
/* project onto { (t, s) | t * l <= s <= t * u, t >= 0 }, Newton's method on t
tx = [t; s], total length = bsize
uses Moreau since \Pi_K*(tx) = \Pi_K(-tx) + tx
*/
static scs_float proj_box_cone(scs_float *tx, const scs_float *bl,
const scs_float *bu, scs_int bsize,
scs_float t_warm_start) {
scs_float *x, gt, ht, t_prev, t = t_warm_start;
scs_int iter, j;
if (bsize == 1) { /* special case */
tx[0] = MAX(tx[0], 0.0);
return tx[0];
}
x = &(tx[1]);
/* should only require about 5 or so iterations, 1 or 2 if warm-started */
for (iter = 0; iter < BOX_CONE_MAX_ITERS; iter++) {
t_prev = t;
/* incorporate the additional BOX_T_SCALE factor into the projection */
gt = BOX_T_SCALE * (t - tx[0]); /* gradient */
ht = BOX_T_SCALE; /* hessian */
for (j = 0; j < bsize - 1; j++) {
if (x[j] > t * bu[j]) {
gt += (t * bu[j] - x[j]) * bu[j]; /* gradient */
ht += bu[j] * bu[j]; /* hessian */
} else if (x[j] < t * bl[j]) {
gt += (t * bl[j] - x[j]) * bl[j]; /* gradient */
ht += bl[j] * bl[j]; /* hessian */
}
}
t = MAX(t - gt / MAX(ht, 1e-8), 0.); /* newton step */
#if VERBOSITY > 3
scs_printf("iter %i, t_new %1.3e, t_prev %1.3e, gt %1.3e, ht %1.3e\n", iter,
t, t_prev, gt, ht);
scs_printf("ABS(gt / (ht + 1e-6)) %.4e, ABS(t - t_prev) %.4e\n",
ABS(gt / (ht + 1e-6)), ABS(t - t_prev));
#endif
/* TODO: sometimes this check can fail (ie, declare convergence before it
* should) if ht is very large, which can happen with some pathological
* problems.
*/
if (ABS(gt / MAX(ht, 1e-6)) < 1e-12 * MAX(t, 1.) ||
ABS(t - t_prev) < 1e-11 * MAX(t, 1.)) {
break;
}
}
if (iter == BOX_CONE_MAX_ITERS) {
scs_printf("warning: box cone proj hit maximum %i iters\n", (int)iter);
}
for (j = 0; j < bsize - 1; j++) {
if (x[j] > t * bu[j]) {
x[j] = t * bu[j];
} else if (x[j] < t * bl[j]) {
x[j] = t * bl[j];
}
/* x[j] unchanged otherwise */
}
tx[0] = t;
#if VERBOSITY > 3
scs_printf("box cone iters %i\n", (int)iter + 1);
#endif
return t;
}
/* project onto SOC of size q*/
static void proj_soc(scs_float *x, scs_int q) {
if (q == 0) {
return;
}
if (q == 1) {
x[0] = MAX(x[0], 0.);
return;
}
scs_float v1 = x[0];
scs_float s = SCS(norm_2)(&(x[1]), q - 1);
scs_float alpha = (s + v1) / 2.0;
if (s <= v1) {
return;
} else if (s <= -v1) {
memset(&(x[0]), 0, q * sizeof(scs_float));
} else {
x[0] = alpha;
SCS(scale_array)(&(x[1]), alpha / s, q - 1);
}
}
static void proj_power_cone(scs_float *v, scs_float a) {
scs_float xh = v[0], yh = v[1], rh = ABS(v[2]);
scs_float x = 0.0, y = 0.0, r;
scs_int i;
/* v in K_a */
if (xh >= 0 && yh >= 0 &&
CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) {
return;
}
/* -v in K_a^* */
if (xh <= 0 && yh <= 0 &&
CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >=
rh * POWF(a, a) * POWF(1 - a, 1 - a)) {
v[0] = v[1] = v[2] = 0;
return;
}
r = rh / 2;
for (i = 0; i < POW_CONE_MAX_ITERS; ++i) {
scs_float f, fp, dxdr, dydr;
x = pow_calc_x(r, xh, rh, a);
y = pow_calc_x(r, yh, rh, 1 - a);
f = pow_calc_f(x, y, r, a);
if (ABS(f) < CONE_TOL) {
break;
}
dxdr = pow_calcdxdr(x, xh, rh, r, a);
dydr = pow_calcdxdr(y, yh, rh, r, (1 - a));
fp = pow_calc_fp(x, y, dxdr, dydr, a);
r = MAX(r - f / fp, 0);
r = MIN(r, rh);
}
v[0] = x;
v[1] = y;
v[2] = (v[2] < 0) ? -(r) : (r);
}
/* project onto the primal K cone in the paper */
static scs_int proj_cone(scs_float *x, const ScsCone *k, ScsConeWork *c,
scs_int normalize) {
scs_int i, status;
scs_int count = 0;
if (k->z) {
/* project onto primal zero / dual free cone */
memset(x, 0, k->z * sizeof(scs_float));
count += k->z;
}
if (k->l) {
/* project onto positive orthant */
for (i = count; i < count + k->l; ++i) {
x[i] = MAX(x[i], 0.0);
}
count += k->l;
}
if (k->bsize) {
/* project onto box cone */
if (normalize) {
c->box_t_warm_start = proj_box_cone(&(x[count]), c->bl, c->bu, k->bsize,
c->box_t_warm_start);
} else {
c->box_t_warm_start = proj_box_cone(&(x[count]), k->bl, k->bu, k->bsize,
c->box_t_warm_start);
}
count += k->bsize; /* since b = (t,s), len(s) = bsize - 1 */
}
if (k->qsize && k->q) {
/* project onto second-order cones */
for (i = 0; i < k->qsize; ++i) {
proj_soc(&(x[count]), k->q[i]);
count += k->q[i];
}
}
if (k->ssize && k->s) {
/* project onto PSD cones */
for (i = 0; i < k->ssize; ++i) {
status = proj_semi_definite_cone(&(x[count]), k->s[i], c);
if (status < 0) {
return status;
}
count += get_sd_cone_size(k->s[i]);
}
}
if (k->ep) {
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < k->ep; ++i) {
proj_exp_cone(&(x[count + 3 * i]));
}
count += 3 * k->ep;
}
if (k->ed) { /* dual exponential cone */
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
scs_int idx;
scs_float r, s, t;
SCS(scale_array)(&(x[count]), -1, 3 * k->ed); /* x = -x; */
#ifdef _OPENMP
#pragma omp parallel for private(r, s, t, idx)
#endif
for (i = 0; i < k->ed; ++i) {
idx = count + 3 * i;
r = x[idx];
s = x[idx + 1];
t = x[idx + 2];
proj_exp_cone(&(x[idx]));
x[idx] -= r;
x[idx + 1] -= s;
x[idx + 2] -= t;
}
count += 3 * k->ed;
}
if (k->psize && k->p) {
scs_float v[3];
scs_int idx;
/* don't use openmp for power cone
ifdef _OPENMP
pragma omp parallel for private(v, idx)
endif
*/
for (i = 0; i < k->psize; ++i) {
idx = count + 3 * i;
if (k->p[i] >= 0) {
/* primal power cone */
proj_power_cone(&(x[idx]), k->p[i]);
} else {
/* dual power cone, using Moreau */
v[0] = -x[idx];
v[1] = -x[idx + 1];
v[2] = -x[idx + 2];
proj_power_cone(v, -k->p[i]);
x[idx] += v[0];
x[idx + 1] += v[1];
x[idx + 2] += v[2];
}
}
count += 3 * k->psize;
}
/* project onto OTHER cones */
return 0;
}
ScsConeWork *SCS(init_cone)(const ScsCone *k, const ScsScaling *scal,
scs_int cone_len) {
ScsConeWork *c = (ScsConeWork *)scs_calloc(1, sizeof(ScsConeWork));
c->cone_len = cone_len;
c->s = (scs_float *)scs_calloc(cone_len, sizeof(scs_float));
if (k->bsize && k->bu && k->bl) {
c->box_t_warm_start = 1.;
if (scal) {
c->bu = (scs_float *)scs_calloc(k->bsize - 1, sizeof(scs_float));
c->bl = (scs_float *)scs_calloc(k->bsize - 1, sizeof(scs_float));
memcpy(c->bu, k->bu, (k->bsize - 1) * sizeof(scs_float));
memcpy(c->bl, k->bl, (k->bsize - 1) * sizeof(scs_float));
/* also does some sanitizing */
normalize_box_cone(c, scal ? &(scal->D[k->z + k->l]) : SCS_NULL,
k->bsize);
}
}
if (k->ssize && k->s) {
if (set_up_sd_cone_work_space(c, k) < 0) {
SCS(finish_cone)(c);
return SCS_NULL;
}
}
return c;
}
/* outward facing cone projection routine
performs projection in-place
if normalize > 0 then will use normalized (equilibrated) cones if applicable.
*/
scs_int SCS(proj_dual_cone)(scs_float *x, const ScsCone *k, ScsConeWork *c,
scs_int normalize) {
scs_int status;
/* copy x, s = x */
memcpy(c->s, x, c->cone_len * sizeof(scs_float));
/* negate x -> -x */
SCS(scale_array)(x, -1., c->cone_len);
/* project -x onto cone, x -> Pi_K(-x) */
status = proj_cone(x, k, c, normalize);
/* return Pi_K*(x) = s + Pi_K(-x) */
SCS(add_scaled_array)(x, c->s, c->cone_len, 1.);
return status;
}
|
WaveFunctionComponent.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#include "Message/Communicate.h"
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "Particle/DistanceTableData.h"
#include "OhmmsData/RecordProperty.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "Particle/MCWalkerConfiguration.h"
#include "type_traits/template_types.hpp"
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
/**@file WaveFunctionComponent.h
*@brief Declaration of WaveFunctionComponent
*/
namespace qmcplusplus
{
#ifdef QMC_CUDA
struct NLjob
{
int walker;
int elec;
int numQuadPoints;
NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {}
};
#endif
///forward declaration of WaveFunctionComponent
class WaveFunctionComponent;
///forward declaration of DiffWaveFunctionComponent
class DiffWaveFunctionComponent;
typedef WaveFunctionComponent* WaveFunctionComponentPtr;
typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr;
/**@defgroup WaveFunctionComponent group
* @brief Classes which constitute a many-body trial wave function
*
* A many-body trial wave function is
* \f[
\Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}),
* \f]
* where \f$\Psi\f$s are represented by
* the derived classes from WaveFunctionComponent.
*/
/** @ingroup WaveFunctionComponent
* @brief An abstract class for a component of a many-body trial wave function
*
* mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects
* which are required to be base class pointers of the same derived class type.
* all the mw_ routines must be implemented in a way either stateless or maintains states of every walker.
*/
struct WaveFunctionComponent : public QMCTraits
{
/** enum for a update mode */
enum
{
ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */
ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */
ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */
ORB_WALKER, /*!< walker update */
ORB_ALLWALKER /*!< all walkers update */
};
typedef ParticleAttrib<ValueType> ValueVectorType;
typedef ParticleAttrib<GradType> GradVectorType;
typedef ParticleSet::Walker_t Walker_t;
typedef Walker_t::WFBuffer_t WFBufferType;
typedef Walker_t::Buffer_t BufferType;
typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
// the value type for log(psi)
using LogValueType = std::complex<QTFull::RealType>;
// the value type for psi(r')/psi(r)
using PsiValueType = QTFull::ValueType;
/** flag to set the optimization mode */
bool IsOptimizing;
/** boolean to set optimization
*
* If true, this object is actively modified during optimization
*/
bool Optimizable;
/** true, if this component is fermionic */
bool is_fermionic;
/** current update mode */
int UpdateMode;
/** current \f$\log\phi \f$
*/
LogValueType LogValue;
/** Pointer to the differential WaveFunctionComponent of this object
*
* If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables
*/
DiffWaveFunctionComponentPtr dPsi;
/** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$
*/
GradVectorType dLogPsi;
/** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$
*/
ValueVectorType d2LogPsi;
/** Name of the class derived from WaveFunctionComponent
*/
std::string ClassName;
///list of variables this WaveFunctionComponent handles
opt_variables_type myVars;
///Bytes in WFBuffer
size_t Bytes_in_WFBuffer;
/// default constructor
WaveFunctionComponent();
//WaveFunctionComponent(const WaveFunctionComponent& old);
///default destructor
virtual ~WaveFunctionComponent() {}
inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; }
///assign a differential WaveFunctionComponent
virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d);
///assembles the full value
PsiValueType getValue() const { return LogToValue<PsiValueType>::convert(LogValue); }
/** check in optimizable parameters
* @param active a super set of optimizable variables
*
* Add the paramemters this WaveFunctionComponent manage to active.
*/
virtual void checkInVariables(opt_variables_type& active) = 0;
/** check out optimizable variables
*
* Update myVars index map
*/
virtual void checkOutVariables(const opt_variables_type& active) = 0;
/** reset the parameters during optimizations
*/
virtual void resetParameters(const opt_variables_type& active) = 0;
/** print the state, e.g., optimizables */
virtual void reportStatus(std::ostream& os) = 0;
/** reset properties, e.g., distance tables, for a new target ParticleSet
* @param P ParticleSet
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** evaluate the value of the WaveFunctionComponent from scratch
* @param P active ParticleSet
* @param G Gradients, \f$\nabla\ln\Psi\f$
* @param L Laplacians, \f$\nabla^2\ln\Psi\f$
* @return the log value
*
* Mainly for walker-by-walker move. The initial stage of particle-by-particle
* move also uses this.
*/
virtual LogValueType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L) = 0;
/** evaluate from scratch the same type WaveFunctionComponent of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$
* @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$
* @@param values the log WF values of walkers in a batch
*/
virtual void mw_evaluateLog(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<ParticleSet::ParticleGradient_t*>& G_list,
const std::vector<ParticleSet::ParticleLaplacian_t*>& L_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->evaluateLog(*P_list[iw], *G_list[iw], *L_list[iw]);
}
/** recompute the value of the WaveFunctionComponents which require critical accuracy.
* needed for Slater Determinants but not needed for most types of WaveFunctionComponents
*/
virtual void recompute(ParticleSet& P) {}
// virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi)
// {
// APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented");
// }
virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all)
{
APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class.");
}
/** return the current gradient for the iat-th particle
* @param P quantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGrad(ParticleSet& P, int iat)
{
APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class.");
return GradType();
}
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw]->evalGrad(*P_list[iw], iat);
}
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<std::reference_wrapper<WaveFunctionComponent>>& WFC_list,
const std::vector<std::reference_wrapper<ParticleSet>>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat);
}
/** return the logarithmic gradient for the iat-th particle
* of the source particleset
* @param Pquantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat)
{
// unit_test_hamiltonian calls this function incorrectly; do not abort for now
// APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented");
return GradType();
}
/** Adds the gradient w.r.t. the iat-th particle of the
* source particleset (ions) of the logarithmic gradient
* and laplacian w.r.t. the target paritlceset (electrons).
* @param P quantum particle set (electrons)
* @param source classical particle set (ions)
* @param iat particle index of source (ion)
* @param the ion gradient of the elctron gradient
* @param the ion gradient of the elctron laplacian.
* @return the log gradient of psi w.r.t. the source particle iat
*/
virtual GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int iat,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
return GradType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat Gradient for the active particle
*/
virtual PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
APP_ABORT("WaveFunctionComponent::ratioGrad is not implemented in " + ClassName + " class.");
return ValueType();
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratioGrad(*P_list[iw], iat, grad_new[iw]);
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratioGrad(P_list[iw], iat, grad_new[iw]);
}
/** a move for iat-th particle is accepted. Update the current content.
* @param P target ParticleSet
* @param iat index of the particle whose new position was proposed
*/
virtual void acceptMove(ParticleSet& P, int iat) = 0;
/** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content.
* Note that all the lists only include accepted walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
*/
virtual void mw_acceptMove(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->acceptMove(*P_list[iw], iat);
}
/** complete all the delayed updates, must be called after each substep or step during pbyp move
*/
virtual void completeUpdates() {}
/** complete all the delayed updates for all the walkers in a batch
* must be called after each substep or step during pbyp move
*/
virtual void mw_completeUpdates(const std::vector<WaveFunctionComponent*>& WFC_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->completeUpdates();
}
/** If a move for iat-th particle is rejected, restore to the content.
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void restore(int iat) = 0;
/** If a move for iat-th particle on some walkers in a batch is rejected, restore their contents
* Note that all the lists only include rejected walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void mw_restore(const std::vector<WaveFunctionComponent*>& WFC_list, int iat)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->restore(iat);
}
/** evaluate the ratio of the new to old WaveFunctionComponent value
* @param P the active ParticleSet
* @param iat the index of a particle
* @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$
*
* Specialized for particle-by-particle move
*/
virtual PsiValueType ratio(ParticleSet& P, int iat) = 0;
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratio(*P_list[iw], iat);
}
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat);
}
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
*/
virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<WFBufferType*>& buf_list)
{
// We can't make this static but we can use a lambda with no capture to
// restrict access to *this scope
auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) {
wfc.registerData(pset, wfb);
};
for (int iw = 0; iw < WFC_list.size(); iw++)
registerComponentData(*(WFC_list[iw]), *(P_list[iw]), *(buf_list[iw]));
}
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param P particle set
* @param buf Anonymous storage
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
* @return log value of the wavefunction.
*/
virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0;
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
* @@param values the log WF values of walkers in a batch
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
*/
virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
const RefVector<WFBufferType>& buf_list,
bool fromscratch = false)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch);
}
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* The log value, P.G and P.L contribution from the objects
* of this class are also added.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list,
const RefVector<ParticleSet>& p_list,
const RefVector<WFBufferType>& buf_list)
{
#pragma omp parallel for
for (int iw = 0; iw < wfc_list.size(); iw++)
wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]);
}
/** make clone
* @param tqp target Quantum ParticleSet
* @param deepcopy if true, make a decopy
*
* If not true, return a proxy class
*/
virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
/** Intended as a handle to break
*
*
*/
//virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0;
/** Return the Chiesa kinetic energy correction
*/
virtual RealType KECorrection();
/** Compute derivatives of the wavefunction with respect to the optimizable
* parameters.
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction.
* Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog.
* Also the factor of -1/2 from the kinetic energy must be included here. The 1/m
* factor is applied in TrialWaveFunction.
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi);
/** Compute derivatives of rhe wavefunction with respect to the optimizable
* parameters
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* Note: this function differs from the evaluateDerivatives function in the way that it only computes
* the derivative of the log of the wavefunction.
*/
virtual void evaluateDerivativesWF(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi);
virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi)
{
RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue));
for (int j = 0; j < myVars.size(); j++)
{
int loc = myVars.where(j);
dlogpsi[loc] *= myrat;
}
}
/** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to
the optimizable parameters, and the dot product of this is then
performed with the passed-in G_in gradient vector. This object is then
returned as dgradlogpsi.
*/
virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n");
}
virtual void finalizeOptimization() {}
/** evaluate the ratios of one virtual move with respect to all the particles
* @param P reference particleset
* @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$
*/
virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
*/
virtual void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
* @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$
*/
virtual void evaluateDerivRatios(VirtualParticleSet& VP,
const opt_variables_type& optvars,
std::vector<ValueType>& ratios,
Matrix<ValueType>& dratios);
/////////////////////////////////////////////////////
// Functions for vectorized evaluation and updates //
/////////////////////////////////////////////////////
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
virtual void freeGPUmem() {}
virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {}
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {}
/** Evaluate the log of the WF for all walkers
* @param walkers vector of all walkers
* @param logPsi output vector of log(psi)
*/
virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
/** Evaluate the wave-function ratio w.r.t. moving particle iat
* for all walkers
* @param walkers vector of all walkers
* @param iat particle which is moving
* @param psi_ratios output vector with psi_new/psi_old
*/
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
// Returns the WF ratio and gradient w.r.t. iat for each walker
// in the respective vectors
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcRatio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addRatio(MCWalkerConfiguration& W,
int iat,
int k,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(std::vector<Walker_t*>& walkers,
std::vector<int>& iatList,
std::vector<PosType>& rNew,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void det_lookahead(MCWalkerConfiguration& W,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl,
int iat,
int k,
int kd,
int nw)
{
APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
std::vector<NLjob>& jobList,
std::vector<PosType>& quadPoints,
std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
gpu::device_vector<CUDA_PRECISION*>& Rlist,
gpu::device_vector<int*>& ElecList,
gpu::device_vector<int>& NumCoreElecs,
gpu::device_vector<CUDA_PRECISION*>& QuadPosList,
gpu::device_vector<CUDA_PRECISION*>& RatioList,
int numQuadPoints)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void evaluateDerivatives(MCWalkerConfiguration& W,
const opt_variables_type& optvars,
RealMatrix_t& dgrad_logpsi,
RealMatrix_t& dhpsi_over_psi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
#endif
};
} // namespace qmcplusplus
#endif
|
opencl_geli_fmt_plug.c | /*
* This software is Copyright (c) 2017 Dhiru Kholia, Copyright (c) 2012-2013
* Lukas Odzioba, Copyright (c) 2014 JimF, Copyright (c) 2014 magnum, and it is
* hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Based on opencl_pbkdf2_hmac_sha512_fmt_plug.c file.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_geli;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_geli);
#else
#include <stdint.h>
#include <string.h>
#include <openssl/bn.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "arch.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "common-opencl.h"
#include "sha2.h"
#include "geli_common.h"
#include "johnswap.h"
#include "hmac_sha.h"
#undef FORMAT_NAME
#include "pbkdf2_hmac_common.h"
#undef FORMAT_NAME
#define FORMAT_NAME "FreeBSD GELI"
#define FORMAT_LABEL "geli-opencl"
#define ALGORITHM_NAME "PBKDF2-SHA512 OpenCL AES"
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define PLAINTEXT_LENGTH 110
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define KERNEL_NAME "pbkdf2_sha512_kernel"
#define SPLIT_KERNEL_NAME "pbkdf2_sha512_loop"
#define CONFIG_NAME "pbkdf2_sha512"
#define HASH_LOOPS 250
#define ITERATIONS 10000
typedef struct {
// for plaintext, we must make sure it is a full uint64_t width.
uint64_t v[(PLAINTEXT_LENGTH + 7) / 8]; // v must be kept aligned(8)
uint64_t length; // keep 64 bit aligned, length is overkill, but easiest way to stay aligned.
} pass_t;
typedef struct {
uint64_t hash[8];
} crack_t;
typedef struct {
// for salt, we append \x00\x00\x00\x01\x80 and must make sure it is a full uint64 width
uint64_t salt[(PBKDF2_64_MAX_SALT_SIZE + 1 + 4 + 7) / 8]; // salt must be kept aligned(8)
uint32_t length;
uint32_t rounds;
} salt_t;
typedef struct {
uint64_t ipad[8];
uint64_t opad[8];
uint64_t hash[8];
uint64_t W[8];
cl_uint rounds;
} state_t;
static unsigned int *cracked;
static custom_salt *cur_salt;
static pass_t *host_pass;
static salt_t *host_salt;
static crack_t *host_crack;
static cl_mem mem_in, mem_out, mem_salt, mem_state;
static cl_kernel split_kernel;
static cl_int cl_error;
static struct fmt_main *self;
#define STEP 0
#define SEED 256
static const char *warn[] = {
"xfer: ", ", init: " , ", crypt: ", ", res xfer: "
};
static int split_events[] = { 2, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t min_lws =
autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
return MIN(min_lws, autotune_get_task_max_work_group_size(FALSE, 0,
split_kernel));
}
static void create_clobj(size_t kpc, struct fmt_main *self)
{
host_pass = mem_calloc(kpc, sizeof(pass_t));
host_crack = mem_calloc(kpc, sizeof(crack_t));
host_salt = mem_calloc(1, sizeof(salt_t));
cracked = mem_calloc(kpc, sizeof(*cracked));
#define CL_RO CL_MEM_READ_ONLY
#define CL_WO CL_MEM_WRITE_ONLY
#define CL_RW CL_MEM_READ_WRITE
#define CLCREATEBUFFER(_flags, _size, _string) \
clCreateBuffer(context[gpu_id], _flags, _size, NULL, &cl_error); \
HANDLE_CLERROR(cl_error, _string);
#define CLKERNELARG(kernel, id, arg, msg) \
HANDLE_CLERROR(clSetKernelArg(kernel, id, sizeof(arg), &arg), msg);
mem_in = CLCREATEBUFFER(CL_RO, kpc * sizeof(pass_t),
"Cannot allocate mem in");
mem_salt = CLCREATEBUFFER(CL_RO, sizeof(salt_t),
"Cannot allocate mem salt");
mem_out = CLCREATEBUFFER(CL_WO, kpc * sizeof(crack_t),
"Cannot allocate mem out");
mem_state = CLCREATEBUFFER(CL_RW, kpc * sizeof(state_t),
"Cannot allocate mem state");
CLKERNELARG(crypt_kernel, 0, mem_in, "Error while setting mem_in");
CLKERNELARG(crypt_kernel, 1, mem_salt, "Error while setting mem_salt");
CLKERNELARG(crypt_kernel, 2, mem_state, "Error while setting mem_state");
CLKERNELARG(split_kernel, 0, mem_state, "Error while setting mem_state");
CLKERNELARG(split_kernel, 1, mem_out, "Error while setting mem_out");
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[128];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DPLAINTEXT_LENGTH=%d -DPBKDF2_64_MAX_SALT_SIZE=%d",
HASH_LOOPS, PLAINTEXT_LENGTH, PBKDF2_64_MAX_SALT_SIZE);
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha512_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], KERNEL_NAME, &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
split_kernel =
clCreateKernel(program[gpu_id], SPLIT_KERNEL_NAME, &cl_error);
HANDLE_CLERROR(cl_error, "Error creating split kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2,
self, create_clobj, release_clobj,
sizeof(state_t), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, ITERATIONS, 0,
(cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); }
}
static void release_clobj(void)
{
if (host_pass) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state");
MEM_FREE(host_pass);
MEM_FREE(host_salt);
MEM_FREE(host_crack);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static custom_salt *cur_salt;
cur_salt = mem_calloc_tiny(sizeof(custom_salt), MEM_ALIGN_WORD);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
p = strtokm(NULL, "$");
p = strtokm(NULL, "$");
cur_salt->md_ealgo = atoi(p);
p = strtokm(NULL, "$");
cur_salt->md_keylen = atoi(p);
p = strtokm(NULL, "$");
p = strtokm(NULL, "$");
cur_salt->md_keys = atoi(p);
p = strtokm(NULL, "$");
cur_salt->md_iterations = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < G_ELI_SALTLEN; i++)
cur_salt->md_salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
for (i = 0; i < (G_ELI_MAXMKEYS * G_ELI_MKEYLEN); i++)
cur_salt->md_mkeys[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
// we append the count and EOM here, one time.
memcpy(&cur_salt->md_salt[G_ELI_SALTLEN], "\x0\x0\x0\x1\x80", 5);
cur_salt->saltlen = G_ELI_SALTLEN + 5; // we include the x80 byte in our saltlen, but the .cl kernel knows to reduce saltlen by 1
MEM_FREE(keeptr);
return (void *)cur_salt;
}
static void set_salt(void *salt)
{
cur_salt = (custom_salt*)salt;
memcpy(host_salt->salt, cur_salt->md_salt, cur_salt->saltlen);
host_salt->length = cur_salt->saltlen;
host_salt->rounds = cur_salt->md_iterations;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE,
0, sizeof(salt_t), host_salt, 0, NULL, NULL),
"Copy salt to gpu");
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
int index;
int loops = (host_salt->rounds + HASH_LOOPS - 1) / HASH_LOOPS;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
global_work_size * sizeof(pass_t), host_pass,
0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
for (i = 0; i < (ocl_autotune_running ? 1 : loops); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id],
split_kernel, 1, NULL,
&global_work_size, lws, 0, NULL,
multi_profilingEvent[2]), "Run split kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish");
opencl_process_event();
}
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
global_work_size * sizeof(crack_t), host_crack,
0, NULL, multi_profilingEvent[3]), "Copy result back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
union {
uint64_t u[8];
unsigned char bytes[64];
} key;
int j;
memcpy(key.bytes, host_crack[index].hash, 64);
for (j = 0; j < 8; j++)
key.u[j] = JOHNSWAP64(key.u[j]);
JTR_hmac_sha512((const unsigned char*)"", 0, key.bytes, G_ELI_USERKEYLEN, key.bytes, G_ELI_USERKEYLEN);
cracked[index] = geli_decrypt_verify(cur_salt, key.bytes);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = MIN(strlen(key), PLAINTEXT_LENGTH);
// make sure LAST uint64 that has any key in it gets null, since we simply
// ^= the whole uint64 with the ipad/opad mask
strncpy((char*)host_pass[index].v, key, PLAINTEXT_LENGTH);
host_pass[index].length = saved_len;
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
memcpy(ret, host_pass[index].v, PLAINTEXT_LENGTH);
ret[host_pass[index].length] = 0;
return ret;
}
struct fmt_main fmt_opencl_geli = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
geli_tests
}, {
init,
done,
reset,
fmt_default_prepare,
geli_common_valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
geli_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
ast-dump-openmp-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target
#pragma omp teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target
#pragma omp teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target
#pragma omp teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target
#pragma omp teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target
#pragma omp teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute.c:3:1, line:8:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:29>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:5:1, col:29>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:29>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:12:1, col:29>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:41>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | | |-value: Int 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 1
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:20:1, col:41>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | |-value: Int 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 1
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:41>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | | |-value: Int 2
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:28:1, col:41>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | |-value: Int 2
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19>
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:41>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | |-value: Int 2
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-OMPTeamsDistributeDirective {{.*}} <line:36:1, col:41>
// CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | |-value: Int 2
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int &'
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int &'
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
NeuralNet-OpenMP.c | /*
Author: Makarios Christakis
Description:
Feedforward multi layer neural network implementation, parallelized for CPUs
using the OpenMP API.
Trained using the MNIST fashion dataset, after normalising the pixel values and
initialising the neuron weights from a standard normal distribution.
Training using the parameters below (500 epochs, 60k training datapoints), the whole
program terminates in about 20 minutes. It achieves 97.4% accuracy on the training
dataset and 85.9% accuracy on the testing set.
More info can be found in execution_info.md
*/
// **********************************************************
// DEFINITIONS
#define NL1 100 // 1st layer size
#define NL2 10 // output layer size
#define NINPUT 784 //input size
#define NTRAIN 60000 //training set size
#define NTEST 10000 //testing set size
#define ITERATIONS 500 //number of epochs
#define ALPHA (double)0.05 //learning rate
// **********************************************************
// INCLUDES
#include "extra_functions.c"
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// **********************************************************
// GLOBAL VARS
double WL1[NL1][NINPUT + 1];
double WL2[NL2][NL1 + 1];
// layer internal states
double DL1[NL1];
double DL2[NL2];
// layer outputs
double OL1[NL1];
double OL2[NL2];
// layer deltas
double delta2[NL2];
double delta1[NL1];
//data
double data_train[NTRAIN][NINPUT];
double data_test[NTEST][NINPUT];
int class_train[NTRAIN];
int class_test[NTEST];
double input[NINPUT];
// **********************************************************
// Implements the feedforward part of the Neural Network using the vector "in" as input.
void activateNN(double *in){
// layer1
#pragma omp parallel for
for (int i = 0; i < NL1; i++)
{
double register sum = 0;
for (int j = 0; j < NINPUT; j++)
{
sum += WL1[i][j] * in[j];
}
sum += WL1[i][NINPUT]; //add bias neuron weight
DL1[i] = sum;
OL1[i] = logistic(sum);
}
// layer2
#pragma omp parallel for
for (int i = 0; i < NL2; i++)
{
double register sum = 0;
for (int j = 0; j < NL1; j++)
{
sum += WL2[i][j] * OL1[j];
}
sum += WL2[i][NL1]; //add bias neuron weight
DL2[i] = sum;
OL2[i] = logistic(sum);
}
}
void trainNN(double *in,double *desired){
// Calculate Neural Network outputs
activateNN(in);
// Output layer deltas
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < NL2; i++)
{
delta2[i] = (OL2[i]-desired[i])*OL2[i]*(1-OL2[i]);
}
// Layer 1 Deltas
#pragma omp for
for (int i = 0; i < NL1; i++)
{
double register sum = 0;
for (int j = 0; j < NL2; j++)
{
sum += WL2[j][i] * delta2[j];
}
double register Oi = OL1[i];
delta1[i] = sum * Oi * (1-Oi);
}
}
#pragma omp parallel
{
// update weights of layer 2
#pragma omp for nowait
for (int i = 0; i < NL2; i++)
{
for (int j = 0; j < NL1; j++)
{
WL2[i][j] -= ALPHA * OL1[j] * delta2[i];
}
WL2[i][NL1] -= ALPHA * delta2[i];//update bias neuron weight
}
// update weights of layer 1
#pragma omp for
for (int i = 0; i < NL1; i++)
{
for (int j = 0; j < NINPUT; j++)
{
WL1[i][j] -= ALPHA * in[j] * delta1[i];
}
WL1[i][NINPUT] -= ALPHA * delta1[i];//update bias neuron weight
}
}
}
// **********************************************************
// Evaluates which class the network predicts that the input belongs to
// by finding the argmax of the output layer.
// Afterwards it updates the confusion matrix with the prediction.
void evaluate(int inputClass,double confMatrix[NL2][NL2]){
int maxIndex = 0;
double maxVal = 0;
for (int i = 0; i < NL2; i++)
{
if (maxVal<OL2[i])
{
maxVal = OL2[i];
maxIndex = i;
}
}
//Edge case where both the correct output layer and another one have the same output value, we consider that a correct classification.
if (maxVal==OL2[inputClass])
{
maxIndex = inputClass;
}
confMatrix[maxIndex][inputClass]++;
}
// **********************************************************
// Normalizes the input data into normal distribution N(0,1)
void normalizeData(double in[][NINPUT],int inSize){
double average[NINPUT] = {0};
double var[NINPUT] = {0};
#pragma omp parallel for
for (int i = 0; i < NINPUT; i++)
{
for (int j = 0; j < inSize; j++)//calculate mean
{
average[i] += in[j][i];
}
average[i] /= inSize;
double register mean = average[i];
for (int j = 0; j < inSize; j++)//calculate variance
{
var[i] += (in[j][i] - mean)*(in[j][i] - mean);
}
var[i] /= inSize-1;
}
#pragma omp parallel for
for (int i = 0; i < NINPUT; i++)
{
double register mean = average[i];
double register stddev = sqrt(var[i]);
for (int j = 0; j < inSize; j++)
{
in[j][i] -= mean;
in[j][i] /= stddev;
}
}
}
// **********************************************************
int main() {
double desiredOut[NL2]={0};
double confusionMatrixTrain[NL2][NL2]= {0};
double confusionMatrixTest[NL2][NL2]= {0};
readfile("./DATA/fashion-mnist_train.csv",class_train,data_train,NTRAIN);
readfile("./DATA/fashion-mnist_test.csv",class_test,data_test,NTEST);
normalizeData(data_test,NTEST);
normalizeData(data_train,NTRAIN);
initVecs();//initialise weights
for (int i = 0; i < NL2; i++)//initialise desired vector values
{
desiredOut[i] = 0.1;
}
int register tmp = 0;
for (int i = 0; i < NTRAIN*ITERATIONS; i++)//train the nn
{
tmp = rand()%NTRAIN;
desiredOut[class_train[tmp]] = 0.9;
trainNN(data_train[tmp],desiredOut);
desiredOut[class_train[tmp]] = 0.1;
}
printf("TRAINING FINISHED!\n\n");
for (int i = 0; i < NTRAIN; i++)//test with training set
{
activateNN(data_train[i]);
evaluate(class_train[i],confusionMatrixTrain);
}
for (int i = 0; i < NTEST; i++)//test with testing set
{
activateNN(data_test[i]);
evaluate(class_test[i],confusionMatrixTest);
}
double register testCorrect = 0;
double register trainCorrect = 0;
for (int i = 0; i < NL2; i++)
{
testCorrect += confusionMatrixTest[i][i];
trainCorrect += confusionMatrixTrain[i][i];
}
double totalCorrect = testCorrect + trainCorrect;
testCorrect /= (double)NTEST;
trainCorrect /= (double)NTRAIN;
totalCorrect /= ((double)NTEST+(double)NTRAIN);
printf("TRAINING SAMPLES CONFUSION MATRIX:\n");
printTable(confusionMatrixTrain);
printf("TESTING SAMPLES CONFUSION MATRIX:\n");
printTable(confusionMatrixTest);
printf("Correct rate in training samples: %0.3f\n",trainCorrect);
printf("Correct rate in testing samples: %0.3f\n",testCorrect);
printf("Overall hit rate: %0.3f\n",totalCorrect);
printf("Learning rate = %0.4f\n",ALPHA);
printf("EPOCHS = %d\n",(int)ITERATIONS);
return 0;
} |
professor_challenge_optmized.c | /*
The number of primes up to 600000 is 49098 and took it 39s to solve it in parallel and 40s sequential
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
static const unsigned int verify_subdomain(const unsigned int start, const unsigned int chunck) {
unsigned int i = 0, j = 0, counter = 0, begin = 0;
begin = (0 == start%2) ? start+1: start;
/* Since and even numbers, except two, cannot be primes */
for(i = begin; i < start+chunck; i += 2) {
/* Since zero cannot dive any number, starts at two -- because one can divide any number */
for(j = 3; j < i; j += 2)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
static const unsigned int n_primes_parallel(const unsigned int limit) {
/* counter is equal to one because of the two */
unsigned int i = 0, j = 0, counter = 1, this_thread = 0, n_threads = 0, start = 0, chunck = 0;
/* Dynamic don't show good results to this particular case */
omp_set_dynamic(0);
#pragma omp parallel default(shared) private(this_thread, n_threads, chunck, start) num_threads(2)
{
this_thread = omp_get_thread_num();
n_threads = omp_get_num_threads();
chunck = limit/n_threads;
start = this_thread*chunck;
if(n_threads-1 == this_thread)
chunck = limit-start;
counter += verify_subdomain(start, chunck);
printf("#%d out of %d\tstart at %d and checking up to %d\n", this_thread+1, n_threads, start, start+chunck);
}
return counter;
}
static const unsigned int n_primes_sequential(const unsigned int limit) {
unsigned int i = 0, j = 0, counter = 1, begin = 0;
for(i = 1; i < limit; i += 2) {
for(j = 3; j < i; j += 2)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
int main(int argc, char **argv) {
/* Above that number the parallel doesn't performe better than the sequential */
const unsigned int limit = 600000;
unsigned int primes_parallel = 0, primes_sequential = 0;
float start_parallel = 0.0, end_parallel = 0.0, start_sequential = 0.0, end_sequential = 0.0;
start_parallel = clock()/CLOCKS_PER_SEC;
primes_parallel = n_primes_parallel(limit);
end_parallel = clock()/CLOCKS_PER_SEC;
start_sequential = clock()/CLOCKS_PER_SEC;
primes_sequential = n_primes_sequential(limit);
end_sequential = clock()/CLOCKS_PER_SEC;
if(primes_parallel == primes_sequential)
printf("\nThe number of primes up to %d is %d and took it %.0fs to solve it in parallel and %.0fs sequential\n", limit, primes_sequential, end_parallel - start_parallel, end_sequential - start_sequential);
else
printf("\nERROR\tWrong number of primes up to %d calculated:\n\tParallel: %d\n\tSequential: %d\n", limit, primes_parallel, primes_sequential);
return 0;
}
|
declare_variant_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// expected-no-diagnostics
int foo(void);
#pragma omp declare variant(foo) match(xxx={}, yyy={ccc})
#pragma omp declare variant(foo) match(xxx={vvv})
#pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)}, device={kind(fpga)})
#pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx})
#pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)}, device={kind(cpu, nohost)})
#pragma omp declare variant(foo) match(device={kind(host)})
#pragma omp declare variant(foo) match(device={kind(nohost), xxx})
int bar(void);
// CHECK: int foo();
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(host)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm)}, device={kind(cpu, nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0): llvm)}, device={kind(fpga)})
// CHECK-NEXT: int bar();
|
ast-dump-openmp-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:4:9, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:10:9, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:17:9, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:24:9, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPForSimdDirective {{.*}} <line:31:9, col:33>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
GB_binop__times_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_uint16
// A.*B function (eWiseMult): GB_AemultB__times_uint16
// A*D function (colscale): GB_AxD__times_uint16
// D*A function (rowscale): GB_DxB__times_uint16
// C+=B function (dense accum): GB_Cdense_accumB__times_uint16
// C+=b function (dense accum): GB_Cdense_accumb__times_uint16
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint16
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint16
// C=scalar+B GB_bind1st__times_uint16
// C=scalar+B' GB_bind1st_tran__times_uint16
// C=A+scalar GB_bind2nd__times_uint16
// C=A'+scalar GB_bind2nd_tran__times_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT16 || GxB_NO_TIMES_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__times_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diagsm_x_bsr_u_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows * A->block_size; ++r)
{
alpha_mul(y[index2(c, r, ldy)] , alpha , x[index2(c, r, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
merge_sect_unnested.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
/* OpenMP Parallel Mergesort - Sections
*
* @author: ANDREW VAILLANCOURT
* 2019
*/
void merge(int a[], int size, int temp[]);
void insertion_sort(int a[], int size);
void mergesort_serial(int a[], int size, int temp[], int thresh);
void mergesort_parallel_omp(int a[], int size, int temp[], int threads, int thresh);
void run_omp(int a[], int size, int temp[], int threads, int thresh);
int main(int argc, char *argv[]) {
if (argc != 4) {
printf("Usage: %s array_size threshold num_threads\n", argv[0]);
return 1;
}
int size = atoi(argv[1]); // Array size
int thresh = atoi(argv[2]); // Threshold at which recursive mergesort switches to Insertion
int threads = atoi(argv[3]); // Requested number of threads
double start, end;
// Check nested parallelism availability
omp_set_nested(1);
if (omp_get_nested() != 1) {
puts("Warning: Nested parallelism desired but unavailable");
}
// Check processors and threads
int processors = omp_get_num_procs(); // Available processors
if (threads > processors) {
printf("Warning: %d threads requested, will run_omp on %d processors available\n",threads, processors);
omp_set_num_threads(threads);
}
int max_threads = omp_get_max_threads(); // Max available threads
if (threads > max_threads) // Requested threads are more than max available
{
printf("Error: Cannot use %d threads, only %d threads available\n",
threads, max_threads);
return 1;
}
// Array allocation
int *a = malloc(sizeof(int) * size);
int *temp = malloc(sizeof(int) * size);
if (a == NULL || temp == NULL) {
printf("Error: Could not allocate array of size %d\n", size);
return 1;
}
// Initialize Array
int i;
srand(314159);
for (i = 0; i < size; i++) {
a[i] = rand() % size;
}
// run sort and get time
start = omp_get_wtime();
run_omp(a, size, temp, threads, thresh);
end = omp_get_wtime();
printf("%.4f\n", end - start);
// check sorted
for (i = 1; i < size; i++) {
if (!(a[i - 1] <= a[i])) {
printf("Error: final array not sorted => a[%d]=%d > a[%d]=%d\n", i - 1,
a[i - 1], i, a[i]);
return 1;
}
}
return 0;
}
void run_omp(int a[], int size, int temp[], int threads, int thresh) {
omp_set_nested(1); // Enable nested parallelism, if available
mergesort_parallel_omp(a, size, temp, threads, thresh);
}
// OpenMP merge sort with given number of threads
void mergesort_parallel_omp (int a[], int size, int temp[], int threads, int thresh) {
if (threads == 1) {
mergesort_serial (a, size, temp, thresh);
}
else if (threads > 1) {
#pragma omp parallel sections
{
#pragma omp section
{
mergesort_parallel_omp (a, size / 2, temp, threads / 2, thresh);
}
#pragma omp section
{
mergesort_parallel_omp (a + size / 2, size - size / 2,
temp + size / 2, threads - threads / 2, thresh);
}
}
merge (a, size, temp);
}
else {
printf ("Error: %d threads\n", threads);
return;
}
}
// only called if num (remaining) threads = 1
void mergesort_serial(int a[], int size, int temp[], int thresh) {
// Switch to insertion sort for small arrays
if (size <= thresh) {
insertion_sort(a, size);
return;
}
mergesort_serial(a, size / 2, temp, thresh);
mergesort_serial(a + size / 2, size - size / 2, temp, thresh);
merge(a, size, temp);
}
// Merge sorted subarrays into temp array,
// then replace in main array
void merge(int a[], int size, int temp[]) {
int i1 = 0;
int i2 = size / 2;
int tempi = 0;
while (i1 < size / 2 && i2 < size) {
if (a[i1] < a[i2]) {
temp[tempi] = a[i1];
i1++;
} else {
temp[tempi] = a[i2];
i2++;
}
tempi++;
}
while (i1 < size / 2) {
temp[tempi] = a[i1];
i1++;
tempi++;
}
while (i2 < size) {
temp[tempi] = a[i2];
i2++;
tempi++;
}
// Copy sorted temp array into main array, a
memcpy(a, temp, size * sizeof(int));
}
void insertion_sort(int a[], int size) {
int i;
for (i = 0; i < size; i++) {
int j, v = a[i];
for (j = i - 1; j >= 0; j--) {
if (a[j] <= v)
break;
a[j + 1] = a[j];
}
a[j + 1] = v;
}
}
|
5987.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for
for (t4 = 1; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 128)
for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 128)
for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 128)
for (t10 = t8; t10 <= (ny - 2 < t8 + 127 ? ny - 2 : t8 + 127); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
modernFft.h | /**
* @cond ___LICENSE___
*
* Copyright (c) 2016-2018 Zefiros Software.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* @endcond
*/
#include "bspedupack.h"
#include <complex>
/****************** Sequential functions ********************************/
void UFft( double *x, uint32_t n, int sign, std::vector<double> &w )
{
/* This sequential function computes the unordered discrete Fourier
transform of a complex vector x of length n, stored in a real array
of length 2n as pairs (Re x[j], Im x[j]), 0 <= j < n.
n=2^m, m >= 0.
If sign = 1, then the forward unordered dft FRx is computed;
if sign =-1, the backward unordered dft conjg(F)Rx is computed,
where F is the n by n Fourier matrix and R the n by n bit-reversal
matrix. The output overwrites x.
w is a table of n/2 complex weights, stored as pairs of reals,
exp(-2*pi*i*j/n), 0 <= j < n/2,
which must have been initialized before calling this function.
*/
#ifdef _MSC_VER
unsigned long end;
_BitScanReverse( &end, n | 1 );
const uint32_t mEnd = static_cast< uint32_t >( end );
#else
const uint32_t mEnd = 31 - __builtin_clz( n );
#endif
for ( uint32_t m = 1; m <= mEnd; ++m )
{
// const uint32_t k = 1 << m;
const uint32_t nk = n >> m;
const uint32_t jEnd = 1 << ( m - 1 );
for ( uint32_t r = 0; r < nk; ++r )
{
const uint32_t rk = r << ( m + 1 );
#pragma omp simd
for ( uint32_t j = 0; j < jEnd; ++j )
{
const uint32_t jprime = j << 1;
const uint32_t jnk = jprime * nk;
const double wr = w[jnk];
const double wi = sign * w[ jnk + 1 ];
const uint32_t j0 = rk + jprime;
const uint32_t j1 = j0 + 1;
const uint32_t j2 = j0 + ( 1 << m );
const uint32_t j3 = j2 + 1;
const double taur = wr * x[j2] - wi * x[j3];
const double taui = wi * x[j2] + wr * x[j3];
x[j2] = x[j0] - taur;
x[j3] = x[j1] - taui;
x[j0] += taur;
x[j1] += taui;
}
}
}
} /* end ufft */
void UFftInit( uint32_t n, std::vector<double> &w )
{
/* This function initializes the n/2 weights to be used
in a sequential radix-2 FFT of length n.
n=2^m, m >= 0.
w is a table of n/2 complex weights, stored as pairs of reals,
exp(-2*pi*i*j/n), 0 <= j < n/2.
*/
if ( n == 1 )
{
return;
}
const double theta = ( -2.0 * M_PI ) / n;
w[0] = 1.0;
w[1] = 0.0;
if ( n == 4 )
{
w[2] = 0.0;
w[3] = -1.0;
}
else if ( n >= 8 )
{
/* weights 1 .. n/8 */
for ( uint32_t j = 1, jEnd = n >> 3; j <= jEnd; ++j )
{
const uint32_t j2 = j << 1;
// std::complex<double> wComplex( 0, j * theta );
// wComplex = std::exp( wComplex );
// //w[j2] = cos( j * theta );
// w[j2] = wComplex.real();
// //w[j2 + 1] = sin( j * theta );
// w[j2 + 1] = wComplex.imag();
#ifdef _MSC_VER
w[j2] = std::cos( j * theta );
w[j2 + 1] = std::sin( j * theta );
#else
__builtin_sincos( j * theta, &w[j2 + 1], &w[j2] );
#endif // _MSC_VER
}
/* weights n/8+1 .. n/4 */
for ( uint32_t j = 0, jEnd = n >> 3, n4 = n >> 2; j < jEnd; ++j )
{
const uint32_t n4j2 = ( n4 - j ) << 1;
const uint32_t j2 = j << 1;
w[n4j2] = -w[j2 + 1];
w[n4j2 + 1] = -w[j2];
}
/* weights n/4+1 .. n/2-1 */
for ( uint32_t j = 1, jEnd = n >> 2, n2 = n >> 1; j < jEnd; ++j )
{
const uint32_t n2j2 = ( n2 - j ) << 1;
const uint32_t j2 = j << 1;
w[n2j2] = -w[j2];
w[n2j2 + 1] = w[j2 + 1];
}
}
} /* end UFftInit */
void Twiddle( std::vector<double> &x, uint32_t n, int sign, double *w )
{
/* This sequential function multiplies a complex vector x
of length n, stored as pairs of reals, componentwise
by a complex vector w of length n, if sign=1, and
by conjg(w), if sign=-1. The result overwrites x.
*/
for ( uint32_t j = 0, jEnd = 2 * n; j < jEnd; j += 2 )
{
const uint32_t j1 = j + 1;
const double wr = w[j];
const double wi = sign * w[j1];
const double xr = x[j];
const double xi = x[j1];
x[j] = wr * xr - wi * xi;
x[j1] = wi * xr + wr * xi;
}
} /* end Twiddle */
void TwiddleInit( uint32_t n, double alpha, std::vector<uint32_t> &rho, double *w )
{
/* This sequential function initializes the weight table w
to be used in twiddling with a complex vector of length n,
stored as pairs of reals.
n=2^m, m >= 0.
alpha is a real shift parameter.
rho is the bit-reversal permutation of length n,
which must have been initialized before calling this function.
The output w is a table of n complex values, stored as pairs of reals,
exp(-2*pi*i*rho(j)*alpha/n), 0 <= j < n.
*/
const double theta = ( -2.0 * M_PI * alpha ) / n;
for ( uint32_t j = 0; j < n; ++j )
{
const uint32_t j2 = j << 1;
const double rhoTheta = rho[j] * theta;
w[j2] = cos( rhoTheta );
w[j2 + 1] = sin( rhoTheta );
}
} /* end TwiddleInit */
void Permute( std::vector<double> &x, uint32_t n, std::vector<uint32_t> &sigma )
{
/* This in-place sequential function permutes a complex vector x
of length n >= 1, stored as pairs of reals, by the permutation sigma,
y[j] = x[sigma[j]], 0 <= j < n.
The output overwrites the vector x.
sigma is a permutation of length n that must be decomposable
into disjoint swaps.
*/
for ( uint32_t j = 0; j < n; j++ )
{
if ( j < sigma[j] )
{
/* swap components j and sigma[j] */
const uint32_t j0 = j << 1;
const uint32_t j1 = j0 + 1;
const uint32_t j2 = sigma[j] << 1;
const uint32_t j3 = j2 + 1;
const double tmpr = x[j0];
const double tmpi = x[j1];
x[j0] = x[j2];
x[j1] = x[j3];
x[j2] = tmpr;
x[j3] = tmpi;
}
}
} /* end Permute */
void BitRevInit( uint32_t n, std::vector<uint32_t> &rho )
{
/* This function initializes the bit-reversal permutation rho
of length n, with n=2^m, m >= 0.
*/
if ( n == 1 )
{
rho[0] = 0;
return;
}
#ifdef _MSC_VER
unsigned long end;
_BitScanReverse( &end, n | 1 );
const uint32_t kEnd = static_cast<uint32_t>( end );
#else
const uint32_t kEnd = 31 - __builtin_clz( n );
#endif
for ( uint32_t j = 0; j < n; ++j )
{
uint32_t val = 0;
for ( uint32_t k = 0; k < kEnd; ++k )
{
const uint32_t lastbit = ( j >> k ) & 0x1;
val = ( val << 1 ) | lastbit;
}
rho[j] = val;
}
} /* end BitRevInit */
/****************** Parallel functions ********************************/
int K1Init( uint32_t n, uint32_t p )
{
/* This function computes the largest butterfly size k1 of the first
superstep in a parallel FFT of length n on p processors with p < n.
*/
uint32_t np = n / p;
uint32_t c;
for ( c = 1; c < p; c *= np );
return n / c;
} /* end K1Init */
void BSPRedistr( std::vector<double> &x, uint32_t n, uint32_t p, uint32_t s, uint32_t c0, uint32_t c1,
bool rev, std::vector<uint32_t> &rho_p )
{
/* This function redistributes the complex vector x of length n,
stored as pairs of reals, from group-cyclic distribution
over p processors with cycle c0 to cycle c1, where
c0, c1, p, n are powers of two with 1 <= c0 <= c1 <= p <= n.
s is the processor number, 0 <= s < p.
If rev=true, the function assumes the processor numbering
is bit reversed on input.
rho_p is the bit-reversal permutation of length p.
*/
const uint32_t np = n / p;
const uint32_t ratio = c1 / c0;
const uint32_t size = std::max( np / ratio, 1u );
const uint32_t npackets = np / size;
std::vector<double> tmp( 2 * size );
uint32_t j0, j2;
if ( rev )
{
j2 = rho_p[s] / c0;
j0 = rho_p[s] - c0 * j2;
}
else
{
j2 = s / c0;
j0 = s - c0 * j2;
}
const uint32_t jglobInit = j2 * c0 * np + j0;
const uint32_t c1np = c1 * np;
BSPProf::InitCommunication();
for ( uint32_t j = 0; j < npackets; ++j )
{
//jglob = j2 * c0 * np + j * c0 + j0;
const uint32_t jglob = jglobInit + j * c0;
const uint32_t jglobDiv = jglob / c1np;
//destproc = ( jglob / ( c1 * np ) ) * c1 + jglob % c1;
const uint32_t destproc = jglobDiv * c1 + jglob % c1;
//destindex = ( jglob % ( c1 * np ) ) / c1;
const uint32_t destindex = ( jglob - jglobDiv * c1np ) / c1;
for ( uint32_t r = 0; r < size; ++r )
{
const uint32_t r2 = r << 1;
const uint32_t jRRatio = ( j + r * ratio ) << 1;
tmp[r2] = x[jRRatio];
tmp[r2 + 1] = x[jRRatio + 1];
}
BSPLib::PutIterator( destproc, tmp.begin(), size * 2, x.begin(), destindex * 2 );
}
BSPProf::FinishCommunication();
BSPLib::SyncPutRequests();
} /* end BSPRedistr */
void bspfft( std::vector<double> &x, uint32_t n, uint32_t p, uint32_t s, int sign, std::vector<double> &w0,
std::vector<double> &w,
std::vector<double> &tw, std::vector<uint32_t> &rho_np, std::vector<uint32_t> &rho_p )
{
/* This parallel function computes the discrete Fourier transform
of a complex array x of length n=2^m, m >= 1, stored in a real array
of length 2n as pairs (Re x[j], Im x[j]), 0 <= j < n.
x must have been registered before calling this function.
p is the number of processors, p=2^q, 0 <= q < m.
s is the processor number, 0 <= s < p.
The function uses three weight tables:
w0 for the unordered fft of length k1,
w for the unordered fft of length n/p,
tw for a number of Twiddles, each of length n/p.
The function uses two bit-reversal permutations:
rho_np of length n/p,
rho_p of length p.
The weight tables and bit-reversal permutations must have been
initialized before calling this function.
If sign = 1, then the dft is computed,
y[k] = sum j=0 to n-1 exp(-2*pi*i*k*j/n)*x[j], for 0 <= k < n.
If sign =-1, then the inverse dft is computed,
y[k] = (1/n) sum j=0 to n-1 exp(+2*pi*i*k*j/n)*x[j], for 0 <= k < n.
Here, i=sqrt(-1). The output vector y overwrites x.
*/
const uint32_t np = n / p;
const uint32_t k1 = K1Init( n, p );
Permute( x, np, rho_np );
bool rev = true;
for ( uint32_t r = 0, rEnd = np / k1; r < rEnd; ++r )
{
UFft( &x[2 * r * k1], k1, sign, w0 );
}
for ( uint32_t c = k1, c0 = 1, ntw = 0; c <= p; c *= np, ++ntw )
{
BSPRedistr( x, n, p, s, c0, c, rev, rho_p );
rev = false;
Twiddle( x, np, sign, &tw[2 * ntw * np] );
UFft( x.data(), np, sign, w );
c0 = c;
}
if ( sign == -1 )
{
const double ninv = 1 / ( double )n;
for ( uint32_t j = 0; j < 2 * np; j++ )
{
x[j] *= ninv;
}
}
} /* end bspfft */
void BSPFftInit( uint32_t n, uint32_t p, uint32_t s, std::vector<double> &w0, std::vector<double> &w,
std::vector<double> &tw,
std::vector<uint32_t> &rho_np, std::vector<uint32_t> &rho_p )
{
/* This parallel function initializes all the tables used in the FFT. */
const uint32_t np = n / p;
BitRevInit( np, rho_np );
BitRevInit( p, rho_p );
const uint32_t k1 = K1Init( n, p );
UFftInit( k1, w0 );
UFftInit( np, w );
for ( uint32_t c = k1, ntw = 0; c <= p; c *= np, ++ntw )
{
const double alpha = ( s % c ) / ( double )( c );
TwiddleInit( np, alpha, rho_np, &tw[2 * ntw * np] );
}
} /* end BSPFftInit */
|
softmax_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "softmax_param.h"
#include <math.h>
/**
* @brief softmax function
* @param[in] vec_in pointer to input vector
* @param[in] dim_vec input vector dimention
* @param[out] p_out pointer to output vector
* @return none.
*
*/
static void GetMaxArray(void* input, void* array, int in_size, int on_size, int num_thread)
{
float* input_ptr = ( float* )input;
float* array_ptr = ( float* )array;
memcpy(array_ptr, input_ptr, in_size * sizeof(float));
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
for (int l = 0; l < in_size; l++)
{
if (array_ptr[l] < input_ptr[j * in_size + l])
array_ptr[l] = input_ptr[j * in_size + l];
}
}
}
static void GetOutResult(void* input, void* output, void* array, void* sum_array, int in_size, int on_size,
int num_thread)
{
float* input_ptr = ( float* )input;
float* output_ptr = ( float* )output;
float* array_ptr = ( float* )array;
float* sum_array_ptr = ( float* )sum_array;
memset(sum_array, 0x0, in_size * sizeof(float));
/* get the exp and the summary */
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
for (int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] = exp(input_ptr[index] - array_ptr[l]);
sum_array_ptr[l] += output_ptr[index];
}
}
/* the final result */
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
for (int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] /= sum_array_ptr[l];
}
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem;
int element_size = input_tensor->elem_size;
int type = input_tensor->data_type;
int dims[4];
for (int i = 0; i < input_tensor->dim_num; i++)
{
dims[i] = input_tensor->dims[i];
}
int axis = softmax_param->axis;
int out_size, in_size, on_size;
out_size = 1;
for (int i = 0; i < axis; i++)
{
out_size *= dims[i];
}
in_size = 1;
for (size_t i = axis + 1; i < input_tensor->dim_num; i++)
{
in_size *= dims[i];
}
on_size = dims[axis];
uint8_t* input = input_tensor->data;
uint8_t* output = output_tensor->data;
float* max_array = ( float* )malloc(in_size * sizeof(float));
float* sum_array = ( float* )malloc(in_size * sizeof(float));
int on_in_size = on_size * in_size;
if (type == TENGINE_DT_UINT8)
{
int totol_size = on_in_size * out_size;
float* input_f = ( float* )malloc(totol_size * 4);
float* output_f = ( float* )malloc(totol_size * 4);
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
uint8_t input_zero = input_tensor->zero_point;
uint8_t output_zero = output_tensor->zero_point;
/* dequant to fp32 */
for (int i = 0; i < out_size; i++)
for (int j = 0; j < on_in_size; j++)
input_f[i * on_in_size + j] = (input[i * on_in_size + j] - input_zero) * input_scale;
/* fp32 softmax */
for (int i = 0; i < out_size; i++)
{
/* get max */
int img_base = i * in_size * on_size;
GetMaxArray(input_f + img_base, max_array, in_size, on_size, exec_graph->num_thread);
GetOutResult(input_f + img_base, output_f + img_base, max_array, sum_array, in_size, on_size,
exec_graph->num_thread);
}
/* quant to uint8 */
for (int i = 0; i < out_size; i++)
for (int j = 0; j < on_in_size; j++)
output[i * on_in_size + j] = round((output_f[i * on_in_size + j] / output_scale) + output_zero);
free(input_f);
free(output_f);
}
else
{
for (int i = 0; i < out_size; i++)
{
/* get max */
int img_base = i * on_in_size * element_size;
GetMaxArray(input + img_base, max_array, in_size, on_size, exec_graph->num_thread);
GetOutResult(input + img_base, output + img_base, max_array, sum_array, in_size, on_size,
exec_graph->num_thread);
}
}
free(max_array);
free(sum_array);
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
int ret = 0;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] ||
input_tensor->dims[3] != output_tensor->dims[3])
ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_softmax_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
static int unreg_softmax_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_softmax_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_softmax_hcl_ops);
|
common.c | #include "common.h"
void printb(const uint64_t v)
{
uint64_t mask = 0x1ULL << (sizeof(v) * CHAR_BIT - 1);
int sum = 0;
do{
putchar(mask & v ? '1' : '0');
sum++;
if(sum%8==0) putchar(',');
} while (mask >>= 1);
}
void create_adjacency(const int nodes, const int lines, const int degree,
const int edge[lines][2], int adjacency[nodes][degree])
{
int count[nodes];
for(int i=0;i<nodes;i++)
count[i] = 0;
for(int i=0;i<lines;i++){
int n1 = edge[i][0];
int n2 = edge[i][1];
adjacency[n1][count[n1]++] = n2;
adjacency[n2][count[n2]++] = n1;
}
}
bool has_duplicated_vertex(const int e00, const int e01, const int e10, const int e11)
{
return (e00 == e10 || e01 == e11 || e00 == e11 || e01 == e10);
}
bool has_duplicated_edge(const int e00, const int e01, const int e10, const int e11)
{
return (e00 == e10 && e01 == e11) || (e00 == e11 && e01 == e10);
}
int getRandom(const int max)
{
return (int)(random()*((double)max)/(1.0+RAND_MAX));
}
int DISTANCE(const int v, const int w, const int height)
{
int w0 = WIDTH (v, height);
int h0 = HEIGHT(v, height);
int w1 = WIDTH (w, height);
int h1 = HEIGHT(w, height);
return abs(w0 - w1) + abs(h0 - h1);
}
int WIDTH(const int v, const int height)
{
return v/height;
}
int HEIGHT(const int v, const int height)
{
return v%height;
}
int ROTATE(const int v, const int height, const int width,
const int groups, const int degree)
{
if(groups != 2 && groups != 4)
ERROR("Invalid groups\n");
int w = WIDTH (v, height);
int h = HEIGHT(v, height);
if(groups == 2){
if(degree != 180)
ERROR("Invalid degree\n");
// degree == 180
return (width-w-1)*height + (height-h-1);
}
else{ // groups == 4
if(degree != 90 && degree != 180 && degree != 270)
ERROR("Invalid degree\n");
if(degree == 90) return h*height + (height-w-1);
else if(degree == 180) return (height-w-1)*height + (height-h-1);
else return (height-h-1)*height + w; // degree == 270
}
}
bool check_symmetric_edge(const int lines, const int edge[lines][2], const int height,
const int width, const int based_height, const int groups)
{
assert(lines%groups == 0);
int tmp_edge[2], based_lines = lines / groups;
if(groups == 2){
for(int i=0;i<based_lines;i++){
for(int j=0;j<2;j++)
tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 180);
if(!has_duplicated_edge(edge[based_lines+i][0], edge[based_lines+i][1], tmp_edge[0], tmp_edge[1]))
if(!( WIDTH (edge[based_lines+i][0], height) + WIDTH (edge[based_lines+i][1], height) == (width-1) &&
HEIGHT(edge[based_lines+i][0], height) + HEIGHT(edge[based_lines+i][1], height) == (height-1))){
printf("i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i,
WIDTH(edge[based_lines+i][0], height), HEIGHT(edge[based_lines+i][0], height),
WIDTH(edge[based_lines+i][1], height), HEIGHT(edge[based_lines+i][1], height),
WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height),
WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height));
return false;
}
}
}
else if(groups == 4){
// 90 degrees
for(int i=0;i<based_lines;i++){
for(int j=0;j<2;j++)
tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 90);
if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines+i][0], edge[based_lines+i][1])){
if(!( WIDTH (edge[based_lines+i][0], height) + WIDTH (edge[based_lines+i][1], height) == (width-1) &&
HEIGHT(edge[based_lines+i][0], height) + HEIGHT(edge[based_lines+i][1], height) == (height-1))){
printf("A i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i,
WIDTH(edge[based_lines+i][0], height), HEIGHT(edge[based_lines+i][0], height),
WIDTH(edge[based_lines+i][1], height), HEIGHT(edge[based_lines+i][1], height),
WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height),
WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height));
return false;
}
}
// 180 degrees
for(int j=0;j<2;j++)
tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 180);
if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines*2+i][0], edge[based_lines*2+i][1])){
if(!( WIDTH (edge[based_lines*2+i][0], height) + WIDTH (edge[based_lines*2+i][1], height) == (width-1) &&
HEIGHT(edge[based_lines*2+i][0], height) + HEIGHT(edge[based_lines*2+i][1], height) == (height-1))){
printf("B i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i,
WIDTH(edge[based_lines*2+i][0], height), HEIGHT(edge[based_lines*2+i][0], height),
WIDTH(edge[based_lines*2+i][1], height), HEIGHT(edge[based_lines*2+i][1], height),
WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height),
WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height));
return false;
}
}
// 270 degrees
for(int j=0;j<2;j++)
tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 270);
if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines*3+i][0], edge[based_lines*3+i][1])){
if(!( WIDTH (edge[based_lines*3+i][0], height) + WIDTH (edge[based_lines*3+i][1], height) == (width-1) &&
HEIGHT(edge[based_lines*3+i][0], height) + HEIGHT(edge[based_lines*3+i][1], height) == (height-1))){
printf("C i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i,
WIDTH(edge[based_lines*3+i][0], height), HEIGHT(edge[based_lines*3+i][0], height),
WIDTH(edge[based_lines*3+i][1], height), HEIGHT(edge[based_lines*3+i][1], height),
WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height),
WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height));
return false;
}
}
}
}
return true;
}
void output_edge(const int lines, const int edge[lines*2], const int height)
{
for(int i=0;i<lines;i++)
printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height),
WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height));
}
void copy_edge(int *restrict buf1, const int *restrict buf2, const int n)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<n;i++)
buf1[i] = buf2[i];
}
void swap(int *a, int *b)
{
int tmp = *a;
*a = *b;
*b = tmp;
}
bool check_loop(const int lines, const int edge[lines][2])
{
timer_start(TIMER_CHECK);
bool flag = true;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<lines;i++)
if(edge[i][0] == edge[i][1])
flag = false;
timer_stop(TIMER_CHECK);
if(flag == false){
for(int i=0;i<lines;i++)
if(edge[i][0] == edge[i][1]){
printf("%d: %d %d <--\n", i, edge[i][0], edge[i][1]);
}
else{
printf("%d: %d %d\n", i, edge[i][0], edge[i][1]);
}
}
return flag;
}
bool check_duplicate_all_edge(const int lines, const int edge[lines][2])
{
timer_start(TIMER_CHECK);
bool flag = true;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<lines;i++)
for(int j=i+1;j<lines;j++)
if(has_duplicated_edge(edge[i][0], edge[i][1], edge[j][0], edge[j][1])){
printf("%d %d %d %d\n", edge[i][0], edge[i][1], edge[j][0], edge[j][1]);
flag = false;
}
timer_stop(TIMER_CHECK);
return flag;
}
int count_duplicate_all_edge(const int lines, const int edge[lines][2])
{
int num = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<lines;i++)
for(int j=i+1;j<lines;j++)
if(has_duplicated_edge(edge[i][0], edge[i][1], edge[j][0], edge[j][1]))
num++;
return num;
}
bool check_duplicate_tmp_edge(const int g_opt, const int groups, int tmp_edge[groups*g_opt][2])
{
timer_start(TIMER_CHECK);
bool flag = true;
for(int i=0;i<g_opt;i++){
int tmp[2] = {tmp_edge[i][0], tmp_edge[i][1]};
for(int j=g_opt;j<groups*g_opt;j++)
if(has_duplicated_edge(tmp[0], tmp[1], tmp_edge[j][0], tmp_edge[j][1]))
flag = false;
}
timer_stop(TIMER_CHECK);
return flag;
}
bool check_duplicate_current_edge(const int lines, const int edge[lines][2], const int tmp_lines,
const int tmp_edge[tmp_lines][2], const int tmp_line[2],
const int groups, const int g_opt, const bool is_center)
{
timer_start(TIMER_CHECK);
int based_lines = lines/groups;
bool flag = true;
if(g_opt == D_2G_OPT){
int tmp_line0 = tmp_line[0]%based_lines;
int tmp_line1 = tmp_line[1]%based_lines;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=rank;i<based_lines;i+=procs)
if(i != tmp_line0 && i != tmp_line1)
for(int j=0;j<tmp_lines;j++)
if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1]))
flag = false;
}
else if(g_opt == D_1G_OPT){
int tmp_line0 = tmp_line[0]%based_lines;
if(! is_center){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=rank;i<based_lines;i+=procs)
if(i != tmp_line0)
for(int j=0;j<tmp_lines;j++)
if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1]))
flag = false;
}
else{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=rank;i<lines;i+=procs)
if(i%based_lines != tmp_line0)
for(int j=0;j<tmp_lines;j++)
if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1]))
flag = false;
}
}
MPI_Allreduce(MPI_IN_PLACE, &flag, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD);
timer_stop(TIMER_CHECK);
return flag;
}
void create_rotate_hash(const int nodes, const int height, const int width, const int groups,
int *rotate_hash)
{
int based_nodes = nodes / groups;
if(groups == 1){
for(int i=0;i<based_nodes;i++)
rotate_hash[i] = i;
}
else if(groups == 2){
int based_height = height / 2;
for(int i=0;i<based_nodes;i++){
int j = (i/based_height) * height + (i%based_height);
rotate_hash[j] = i;
rotate_hash[ROTATE(j, height, width, groups, 180)] = i + based_nodes;
}
}
else{
int based_height = height / 2;
for(int i=0;i<based_nodes;i++){
int j = (i/based_height) * height + (i%based_height);
rotate_hash[j] = i;
rotate_hash[ROTATE(j, height, width, groups, 90)] = i + based_nodes;
rotate_hash[ROTATE(j, height, width, groups, 180)] = i + based_nodes * 2;
rotate_hash[ROTATE(j, height, width, groups, 270)] = i + based_nodes * 3;
}
}
}
|
alm2map_pol_der1.h |
#ifndef _POLDER_H_
#define _POLDER_H_
#include <vector>
#include "alm.h"
#include "arr.h"
// #include "fftpack_support.h"
// #include "ylmgen.h"
#include "healpix_map.h"
#include "xcomplex.h"
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#define SMAXCHK 50 // maximum size of chunk (in number of ring pairs)
#define KvS 1.0
#define RSMAX 20
#define RSMIN -20
#define HPX_MXL0 40
#define HPX_MXL1 1.35
#define LOG2LG 100
double rescale_tab[41]; // index from -20 to 20
template<typename T> void gen_mfac( long m_max, arr<T> &m_fact )
{
long m;
m_fact[0] = 1.0;
for( m=1; m <= m_max; m++ )
{
m_fact[m] = m_fact[m-1]*sqrt( (double)(2*m+1)/(double)(2*m) );
}
for( m=0; m <= m_max; m++ )
{
m_fact[m] = log( inv_sqrt4pi * m_fact[m] ) * inv_ln2;
}
}
template void gen_mfac( long m_max, arr<float> &m_fact );
template void gen_mfac( long m_max, arr<double> &m_fact );
template<typename T> void gen_normpol( long l_max, arr<T> &normal_l )
{
long l;
double fl,xx;
normal_l[0] = 0.0;
normal_l[1] = 0.0;
for( l=2; l <= l_max; l++)
{
fl = (double)l;
xx = (fl + 2.0) * (fl + 1.0) * fl * (fl - 1.0);
normal_l[l] = sqrt( KvS / xx );
}
}
void init_rescale()
{
long smax;
double FL_LARGE = pow( 2.0, (double)LOG2LG );
double logOVFLOW = log( FL_LARGE );
smax = (long)( log( DBL_MAX ) / logOVFLOW );
if( smax > (RSMAX-1) )
{
cout << "Array rescale_tab too small" << '\n';
cout << "smax: " << smax << " RSMAX: " << RSMAX << '\n';
}
for( long n=-20; n <= 20; n++ )
{
rescale_tab[n+20] = pow( FL_LARGE, (double)(n - RSMAX) );
}
rescale_tab[20] = 1.0;
}
void get_pixel_layout( long nside, long ith, double &cth, double &sth, long &nphi, long &startpix, long &kphi0 )
{
long nrings;
double dth1, dth2, dst1;
nrings = 2*nside;
//if (ith < 1 .or. ith> nrings) then
// prlong*,'ith out of bounds ',ith,1,nrings
// call fatal_error
//endif
dth1 = 1.0 / ( 3.0*nside*nside );
dth2 = 2.0 / ( 3.0*nside );
dst1 = 1.0 / ( sqrt(6.0) * nside );
if( ith < nside )
{
cth = 1.0 - (double)(ith*ith) * dth1;
nphi = 4*ith;
kphi0 = 1;
sth = sin( 2.0 * asin( ith * dst1 ) );// sin(theta)
startpix = 2*ith*( ith - 1 );
}
else
{
cth = ( 2*nside - ith ) * dth2;
nphi = 4*nside;
kphi0 = ( ith + 1 - nside ) % 2;
sth = sqrt( ( 1.0 - cth )*( 1.0 + cth ) );// sin(theta)
startpix = 2*nside*( nside - 1 ) + ( ith - nside )*nphi;
}
}
template<typename T> void gen_recfac( long l_max, long m, arr2<T> &recfac )
{
double fm2, fl2;
recfac.fill(0.0);
fm2 = m*m;
for( long l=m; l <= l_max; l++ )
{
fl2 = (l+1)*(l+1);
recfac[0][l] = sqrt( ( 4.0 * fl2 - 1.0 ) / ( fl2 - fm2 ) );
recfac[1][l] = 1.0 / recfac[0][l];
}
}
template void gen_recfac( long l_max, long m, arr2<float> &recfac );
template void gen_recfac( long l_max, long m, arr2<double> &recfac );
template<typename T> void gen_lamfac( long l_max, long m, arr<T> &lam_fact )
{
double fm2, fl, fl2;
long l_start;
lam_fact.fill(0.0);
fm2 = m*m;
l_start = MAX( 2, m+1 );
for( long l=l_start; l <= l_max; l++ )
{
fl = l;
fl2 = l*l;
lam_fact[l] = sqrt( ( 2.0 * fl + 1.0 ) / ( 2.0 * fl - 1.0 ) * ( fl2 - fm2 ) );
}
}
template void gen_lamfac( long l_max, long m, arr<float> &lam_fact );
template void gen_lamfac( long l_max, long m, arr<double> &lam_fact );
template<typename T> void gen_lamfac_der( long l_max, long m, arr<T> &lam_fact )
{
double fm2, fl, fl2;
long l_start;
lam_fact.fill(0.0);
fm2 = m*m;
l_start = MAX( 1, m+1 );//different lower bound than pol. factor
for( long l=l_start; l <= l_max; l++ )
{
fl = l;
fl2 = l*l;
lam_fact[l] = sqrt( ( 2.0 * fl + 1.0 ) / ( 2.0 * fl - 1.0 ) * ( fl2 - fm2 ) );
//different normalization than polarization factor
}
}
template void gen_lamfac_der( long l_max, long m, arr<float> &lam_fact );
template void gen_lamfac_der( long l_max, long m, arr<double> &lam_fact );
inline long l_min_ylm( long m, double sth )
{
long lmin = m;
if( HPX_MXL0 > 0 )
{
lmin = MAX( lmin, long( ( m - HPX_MXL0 )/( HPX_MXL1 * sth ) ) );
}
return lmin;
}
template<typename T> void do_lam_lm_pol( long lmax, long m, double cth, double sth, double mfac, arr2<T> recfac, arr<T> lam_fact, arr2<T> &lam_lm )
{
long scalel, l, l_min, index_rstab;
double log2val, dlog2lg, ovflow, unflow, corfac, lam_mm, lam_0, lam_1, lam_2, lam_lm1m, normal_m, fm2, fl, flm1, two_cth, one_on_s2, fm_on_s2, two_on_s2, c_on_s2, a_w, a_x, b_w;
//define constants
ovflow = rescale_tab[21];
unflow = rescale_tab[19];
l_min = l_min_ylm( m, sth );
dlog2lg = (double)LOG2LG;
fm2 = m*m;
normal_m = ( 2.0 * m ) * ( 1 - m );
two_cth = 2.0 * cth;
one_on_s2 = 1.0 / ( sth * sth );
fm_on_s2 = m * one_on_s2;
two_on_s2 = 2.0 * one_on_s2;
c_on_s2 = cth * one_on_s2;
b_w = c_on_s2;
//computes lamba_mm
log2val = mfac + m*log( sth ) * inv_ln2;// log_2(lam_mm)
scalel = long ( log2val / dlog2lg );
index_rstab = 20 + MAX( scalel, RSMIN );
corfac = rescale_tab[index_rstab];
lam_mm = pow( 2.0, log2val - scalel * dlog2lg ); // rescaled lam_mm
if( (m & 1) > 0 )
{
lam_mm = -lam_mm; // negative for odd m
}
lam_lm.fill(0.0);
// --- l = m ---
lam_lm[0][m] = corfac * lam_mm;//Actual lam_mm
if( m >= l_min )
{
//skip Ymm if too small
lam_lm[1][m] = ( normal_m * lam_lm[0][m] ) * ( 0.5 - one_on_s2 );
lam_lm[2][m] = ( normal_m * lam_lm[0][m] ) * c_on_s2;
}
// --- l > m ---
lam_0 = 0.0;
lam_1 = 1.0;
lam_2 = cth * lam_1 * recfac[0][m];
for( l = m+1; l <= lmax; l++ )
{
//do recursion
lam_lm1m = lam_lm[0][l-1] * lam_fact[l];// must be incremented even if not used
lam_lm[0][l] = lam_2 * corfac * lam_mm;
if( l >= l_min )
{
fl = l;
flm1 = fl - 1.0;
a_w = two_on_s2 * ( fl - fm2 ) + flm1 * fl;
a_x = two_cth * flm1;
lam_lm[1][l] = b_w * lam_lm1m - a_w * lam_lm[0][l];
lam_lm[2][l] = fm_on_s2 * ( lam_lm1m - a_x * lam_lm[0][l] );
}
lam_0 = lam_1 * recfac[1][l-1];
lam_1 = lam_2;
lam_2 = ( cth * lam_1 - lam_0 ) * recfac[0][l];
// do dynamic rescaling
if( abs(lam_2) > ovflow )
{
lam_1 = lam_1*unflow;
lam_2 = lam_2*unflow;
scalel = scalel + 1;
index_rstab = 20 + MAX( scalel, RSMIN );
corfac = rescale_tab[index_rstab];
}
else
{
if( (abs(lam_2) < unflow) && (abs(lam_2) != 0.0) )
{
lam_1 = lam_1*unflow;
lam_2 = lam_2*unflow;
scalel = scalel - 1;
index_rstab = 20 + MAX( scalel, RSMIN );
corfac = rescale_tab[index_rstab];
}
}
}
}
template void do_lam_lm_pol( long lmax, long m, double cth, double sth, double mfac, arr2<float> recfac, arr<float> lam_fact, arr2<float> &lam_lm );
template void do_lam_lm_pol( long lmax, long m, double cth, double sth, double mfac, arr2<double> recfac, arr<double> lam_fact, arr2<double> &lam_lm );
template<typename T> void ring_synthesis( long nside, long lmax, long mmax, arr<xcomplex<T> > datain, long nph, arr<double> &dataout, long kphi0 )
{
long iw, ksign, m, k, kshift;
double arg;
xcomplex<T> dat, cplx;
ksign = 1;
kshift = pow( (double)-1, (double)kphi0 );// either 1 or -1
arr<xcomplex<T> > bw;
bw.alloc( nph );
bw.fill( 0.0 );
// all frequencies [-m,m] are wrapped in [0,nph-1]
bw[0] = datain[0];
for( m=1; m <= mmax; m++ )
{
iw = m % nph;// between 0 and nph-1 = m
k = ( m - iw) / nph;// number of 'turns'
bw[iw] = bw[iw] + datain[m] * pow( (double)kshift, (double)k );// complex number
iw = -m % nph;// between 0 and nph-1 = m
k = (-m - iw) / nph;// number of 'turns'
bw[iw] = bw[iw] + conj(datain[m]) * xcomplex<REAL> (pow( (double)kshift, (double)k ), 0.);// complex number
}
cout << "ring_synthesis loop for( m=1; m <= mmax; m++ ) done" << '\n';
// kshift**k = 1 for even turn numbers
// = 1 or -1 for odd turn numbers : results from the shift in space
// applies the shift in position <-> phase factor in Fourier space
dataout[0] = bw[0].real();
for( iw=1; iw < nph/2; iw++ )
{
m = ksign * iw;
if( kphi0 == 1 )
{
arg = m * PI / (double)(nph);
cplx.Set( cos(arg), sin(arg) );
dat = bw[iw] * cplx;
}
else
{
dat = bw[iw];
}
dataout[iw*2-1] = dat.real();
dataout[iw*2] = dat.imag();
}
cout << "ring_synthesis loop for( iw=1; iw < nph/2; iw++ ) done" << '\n';
iw = nph / 2;
m = ksign * iw;
if( kphi0 == 1 )
{
arg = m * PI / (double)(nph);
cplx.Set( cos(arg), sin(arg) );
dat = bw[iw] * cplx;
}
else
{
dat = bw[iw];
}
dataout[iw*2-1] = dat.real();
double minv, maxv;
dataout.minmax( minv, maxv);
cout << "dataout min: " << minv << " dataout max: " << maxv << '\n';
//long size = dataout.size();
//cout << "dataout size: " << size << '\n';
//for( long ind=0; ind < size; ind++ )
//{
// cout << " index: " << ind;// << " data: " << dataout[ind];
//}
rfft plan;
plan.Set( nph );
plan.backward_fftpack( dataout );
cout << "\nring_synthesis fft done, exiting" << '\n';
}
template void ring_synthesis( long nside, long lmax, long mmax, arr<xcomplex<float> > datain, long nph, arr<double> &dataout, long kphi0 );
template void ring_synthesis( long nside, long lmax, long mmax, arr<xcomplex<double> > datain, long nph, arr<double> &dataout, long kphi0 );
/*
template<typename T> void alm2map_pol_der1( const Alm<xcomplex<T> > Alm_T, const Alm<xcomplex<T> > Alm_E, const Alm<xcomplex<T> > Alm_B,
Healpix_Map<T> &mapT, Healpix_Map<T> &mapQ, Healpix_Map<T> &mapU,
Healpix_Map<T> &mapT_d_theta, Healpix_Map<T> &mapQ_d_theta, Healpix_Map<T> &mapU_d_theta,
Healpix_Map<T> &mapT_d_phi, Healpix_Map<T> &mapQ_d_phi, Healpix_Map<T> &mapU_d_phi )
{
planck_assert( mapT.Scheme() == RING, "alm2map_pol_der1: maps must be in RING scheme" );
planck_assert( mapT.conformable( mapQ ) && mapT.conformable( mapU ), "alm2map_pol_der1: maps are not conformable" );
planck_assert( mapT.conformable( mapT_d_theta ) && mapT.conformable( mapT_d_phi ), "alm2map_pol_der1: maps are not conformable" );
planck_assert( mapT_d_theta.Scheme() == RING, "alm2map_pol_der1: maps must be in RING scheme" );
planck_assert( mapT_d_theta.conformable( mapQ_d_theta ) && mapT_d_theta.conformable( mapU_d_theta ), "alm2map_pol_der1: maps are not conformable" );
planck_assert( mapT_d_phi.Scheme() == RING, "alm2map_pol_der1: maps must be in RING scheme" );
planck_assert( mapT_d_phi.conformable( mapQ_d_phi ) && mapT_d_phi.conformable( mapU_d_phi ), "alm2map_pol_der1: maps are not conformable" );
planck_assert( Alm_T.conformable( Alm_E ) && Alm_T.conformable( Alm_B ), "alm2map_pol_der1: a_lm are not conformable" );
long startpix[SMAXCHK];
long nph[SMAXCHK];
long kphi0[SMAXCHK];
double cth[SMAXCHK];
double sth[SMAXCHK];
double one_on_s[SMAXCHK];
long nside = mapT.Nside();
long npix = mapT.Npix();
long lmax = Alm_T.Lmax();
long mmax = Alm_T.Mmax();
long nrings = 2*nside;
long nphmx = 4*nside;
long nchunks = nrings/SMAXCHK + 1;// number of chunks
long chunksize = ( nrings + nchunks - 1 )/nchunks;// <= SMAXCHK
arr<T> mfac, normal_l, lam_fact, lam_fact_der;
arr<double> ring;
mfac.alloc( mmax+1 );
normal_l.alloc( lmax+1 );
arr3<xcomplex<T> > b_d1;
b_d1.alloc( 18, mmax+1, chunksize );
arr2<T> recfac, dalm, lam_lm;
arr<xcomplex<T> > bline;
// warning compilation case OpenMP
//
// if( do_openmp() == false )// if (.not. do_openmp())
// {
// lam_fact.alloc( lmax+1 );
// lam_fact_der.alloc( lmax+1 );
// ring.alloc( nphmx );
// recfac.alloc( 2, lmax+1 );
// dalm.alloc( 6, lmax+1 );
// lam_lm.alloc( 3, lmax+1 );
// bline.alloc( mmax+1 );
// }
gen_mfac( mmax, mfac );// init mfac array
init_rescale();
gen_normpol( lmax, normal_l );// generate Polarization normalisation
mapT.fill(0.0);
mapQ.fill(0.0);
mapU.fill(0.0);
mapT_d_theta.fill(0.0);
mapQ_d_theta.fill(0.0);
mapU_d_theta.fill(0.0);
mapT_d_phi.fill(0.0);
mapQ_d_phi.fill(0.0);
mapU_d_phi.fill(0.0);
cout << "alm2map_pol_der1 init done" << '\n';
long lchk, uchk, ith, ithl;
for( long ichunk=0; ichunk < nchunks; nchunks++ )
{
lchk = ichunk * chunksize + 1;
uchk = MIN( lchk + chunksize - 1, nrings );
for( ith = lchk; ith <= uchk; ith++ )
{
ithl = ith - lchk;// local index
//get pixel location information
get_pixel_layout( nside, ith, cth[ithl], sth[ithl], nph[ithl], startpix[ithl], kphi0[ithl] );
one_on_s[ithl] = 1.0 / sth[ithl];
}
//for each theta, and each m, computes
//b(m,theta) = sum_over_l>m (lambda_l_m(theta) * a_l_m)
//lambda_mm tends to go down when m increases (risk of underflow)
//lambda_lm tends to go up when l increases (risk of overflow)
b_d1.fill(0);//pad with zeros
long m, ll, l_min, k, k0, k1, par_lm, l;
double fm, f2m, fm2, lam_lm1m, cth_ring, one_on_s1, one_on_s2, cotanth, fllp1, fl, a0, xp, at, aq, derW, derX, derY, f2, f3, b0t, b0p, bx;
double factor[2];
arr<T> b_ns, b_ns_p, b_ns_t;
//b_ns.alloc(12);// index = -3:8
//b_ns_p.alloc(12);
//b_ns_t.alloc(12);
cout << "alm2map_pol_der1 enter first parallel loop" << '\n';
#pragma omp parallel default(none) \
shared( lmax, mmax, lchk, uchk, rescale_tab, normal_l, cth, sth, mfac, Alm_T, Alm_E, Alm_B, one_on_s, b_d1 ) \
private( recfac, dalm, lam_fact, lam_fact_der, m, ll, fm, f2m, fm2, ithl, l_min, k, k0, k1, par_lm, lam_lm, \
lam_lm1m, cth_ring, one_on_s1, one_on_s2, cotanth, factor, b_ns, b_ns_t, b_ns_p, l, fllp1, fl, a0, xp, at, aq, \
derW, derX, derY, f2, f3, b0t, b0p, bx )
{
// warning compilation case OpenMP
//if( do_openmp() == true )// if ( do_openmp())
//{
recfac.alloc( 2, lmax+1 );
dalm.alloc( 6, lmax+1 );
lam_fact.alloc( lmax+1 );
lam_fact_der.alloc( lmax+1 );
lam_lm.alloc( 3, lmax+1 );
//}
b_ns.alloc(12);// index = -3:8
b_ns_p.alloc(12);
b_ns_t.alloc(12);
//printf( "alm2map_pol_der1 1st parallel region dynamic allocation done\n" );
//printf( "alm2map_pol_der1 b_ns size: %ld\n", b_ns.size() );
//printf( "alm2map_pol_der1 b_d1 size1: %ld, b_d1 size2: %ld, b_d1 size3: %ld\n", b_d1.size1(), b_d1.size2(), b_d1.size3() );
#pragma omp for schedule( dynamic, 1 )
for( m=0; m <= mmax; m++ )
{
//generate recursion factors (recfac) for Ylm of degree m
gen_recfac( lmax, m, recfac );
//generate Ylm relation factor for degree m
gen_lamfac_der( lmax, m, lam_fact_der );
gen_lamfac( lmax, m, lam_fact );
f2m = 2.0 * m;
fm2 = m*m;
fm = m;
//printf( "alm2map_pol_der1 recfac and Ylm generated\n" );
//extract needed alm under memory and CPU efficient form
for( ll=m; ll <= lmax; ll++ )
{
dalm[0][ll] = Alm_T( ll, m ).re;
dalm[1][ll] = Alm_T( ll, m ).im;
dalm[2][ll] = Alm_E( ll, m ).re*normal_l[ll];
dalm[3][ll] = Alm_E( ll, m ).im*normal_l[ll];
dalm[4][ll] = Alm_B( ll, m ).re*normal_l[ll];
dalm[5][ll] = Alm_B( ll, m ).im*normal_l[ll];
}
//printf( "alm2map_pol_der1 alm extracted\n" );
for( ithl=0; ithl <= (uchk-lchk); ithl ++ )
{
l_min = l_min_ylm( m, sth[ithl] );
if( lmax >= l_min )
{
//printf( "alm2map_pol_der1 enter if( lmax >= l_min )\n" );
//compute lam_lm(p,theta) for all l>=m
do_lam_lm_pol( lmax, m, cth[ithl], sth[ithl], mfac[m], recfac, lam_fact, lam_lm );
cth_ring = cth[ithl];
one_on_s1 = one_on_s[ithl];
one_on_s2 = one_on_s[ithl]*one_on_s[ithl];
cotanth = cth_ring * one_on_s[ithl];
b_ns.fill(0.0);
b_ns_t.fill(0.0);
b_ns_p.fill(0.0);
for( l=l_min; l <= lmax; l++ )
{
//printf( "alm2map_pol_der1 enter for( l=l_min; l <= lmax; l++ )\n" );
fl = l;
fllp1 = l*l + l;
par_lm = 3;//! = (-1)^(l+m)
if( ( (l + m) % 2 ) == 1 )
{
par_lm = -par_lm;
}
//printf( "alm2map_pol_der1 par_lm value: %ld\n", par_lm );
//printf( "alm2map_pol_der1 index l value: %ld, index m value: %ld\n", l, m );
//--------------------------
// f = Y_lm * a_lm;
factor[0] = lam_lm[0][l] * dalm[0][l];
factor[1] = lam_lm[0][l] * dalm[1][l];
//printf( "alm2map_pol_der1 factor param set\n" );
//printf( "alm2map_pol_der1 b_ns size: %ld\n", b_ns.size() );
b_ns[par_lm+3] = b_ns[par_lm+3] + factor[0];// T even
b_ns[par_lm+4] = b_ns[par_lm+4] + factor[1];
//printf( "alm2map_pol_der1 b_ns tab T even part done\n" );
b_ns[par_lm+5] = b_ns[par_lm+5] - lam_lm[1][l] * dalm[2][l];// Q, U even
b_ns[par_lm+6] = b_ns[par_lm+6] - lam_lm[1][l] * dalm[3][l];
b_ns[par_lm+7] = b_ns[par_lm+7] - lam_lm[1][l] * dalm[4][l];
b_ns[par_lm+8] = b_ns[par_lm+8] - lam_lm[1][l] * dalm[5][l];
//printf( "alm2map_pol_der1 b_ns tab Q, U even part done\n" );
b_ns[5-par_lm] = b_ns[5-par_lm] + lam_lm[2][l] * dalm[5][l];// Q odd
b_ns[6-par_lm] = b_ns[6-par_lm] - lam_lm[2][l] * dalm[4][l];
b_ns[7-par_lm] = b_ns[7-par_lm] - lam_lm[2][l] * dalm[3][l];// U odd
b_ns[8-par_lm] = b_ns[8-par_lm] + lam_lm[2][l] * dalm[2][l];
//printf( "alm2map_pol_der1 b_ns tab computed\n" );
//-------------------------- 1st derivatives
//printf( "alm2map_pol_der1 compute 1st derivatives\n" );
if( l > 0 )
{
// df/dphi = i * m * Y_lm * a_lm
f2 = m * lam_lm[1][l];
f3 = m * lam_lm[2][l];
b_ns_p[par_lm+3] = b_ns_p[par_lm+3] - m * factor[1];// warning negative index
b_ns_p[par_lm+4] = b_ns_p[par_lm+4] + m * factor[0];
b_ns_p[par_lm+5] = b_ns_p[par_lm+5] + f2 * dalm[3][l];
b_ns_p[par_lm+6] = b_ns_p[par_lm+6] - f2 * dalm[2][l];
b_ns_p[par_lm+7] = b_ns_p[par_lm+7] + f2 * dalm[5][l];
b_ns_p[par_lm+8] = b_ns_p[par_lm+8] - f2 * dalm[4][l];
b_ns_p[5-par_lm] = b_ns_p[5-par_lm] + f3 * dalm[4][l];// Q odd
b_ns_p[6-par_lm] = b_ns_p[6-par_lm] + f3 * dalm[5][l];
b_ns_p[7-par_lm] = b_ns_p[7-par_lm] - f3 * dalm[2][l];// U odd
b_ns_p[8-par_lm] = b_ns_p[8-par_lm] - f3 * dalm[3][l];
// dY_lm/dtheta = (l/tan(theta)*Y_lm -fact/sin(theta)*Y_l-1m)
// dW_lm/dtheta = (l/tan(theta)*W_lm - S*m/l/sin(theta)*X_lm -fact/sin(theta)*sqrt(1-S^2/l^2)*W_l-1m
// dX_lm/dtheta = (l/tan(theta)*X_lm - S*m/l/sin(theta)*W_lm -fact/sin(theta)*sqrt(1-S^2/l^2)*X_l-1m
a0 = fl * cotanth;// l/tan(theta)
at = lam_fact_der[l] * one_on_s1;// sqrt((2l+1)/(2l-1)*(l^2-m^2))/sin(theta)
derY = a0 * lam_lm[0][l] - at * lam_lm[0][l-1];
b_ns_t[3-par_lm] = b_ns_t[3-par_lm] + derY * dalm[0][l];// T odd
b_ns_t[4-par_lm] = b_ns_t[4-par_lm] + derY * dalm[1][l];
}
if( l > 1 )
{
xp = (2*m)*one_on_s1/fl;// spin m / (l sin(theta))
aq = at * sqrt( 1.0 - 4.0/( fl * fl ) );// at * sqrt(l^2-spin^2)/l
derW = a0 * lam_lm[1][l] - aq * lam_lm[1][l-1] + xp * lam_lm[2][l];
derX = a0 * lam_lm[2][l] - aq * lam_lm[2][l-1] + xp * lam_lm[1][l];
b_ns_t[5-par_lm] = b_ns_t[5-par_lm] - derW * dalm[2][l];// Q, U odd
b_ns_t[6-par_lm] = b_ns_t[6-par_lm] - derW * dalm[3][l];
b_ns_t[7-par_lm] = b_ns_t[7-par_lm] - derW * dalm[4][l];
b_ns_t[8-par_lm] = b_ns_t[8-par_lm] - derW * dalm[5][l];
b_ns_t[5+par_lm] = b_ns_t[5+par_lm] + derX * dalm[5][l];// Q even
b_ns_t[6+par_lm] = b_ns_t[6+par_lm] - derX * dalm[4][l];
b_ns_t[7+par_lm] = b_ns_t[7+par_lm] - derX * dalm[3][l];// U even
b_ns_t[8+par_lm] = b_ns_t[8+par_lm] + derX * dalm[2][l];
}
}//end loop on l
for( k=0; k <= 2; k++ )
{
// loop on T,Q,U
k0 = 2*k;
k1 = k0 + 1;
// fields
b_d1( 0+k, m, ithl).re = b_ns[k0+6] + b_ns[k0];// north=Even+Odd
b_d1( 0+k, m, ithl).im = b_ns[k1+6] + b_ns[k1];
b_d1( 3+k, m, ithl).re = b_ns[k0+6] - b_ns[k0];// south=Even-Odd
b_d1( 3+k, m, ithl).im = b_ns[k1+6] - b_ns[k1];
//printf( "alm2map_pol_der1 b_d1: T, Q, U\n" );
// dfield/dtheta
b_d1( 6+k, m, ithl).re = b_ns_t[k0+6] + b_ns_t[k0];// north=Even+Odd
b_d1( 6+k, m, ithl).im = b_ns_t[k1+6] + b_ns_t[k1];
b_d1( 9+k, m, ithl).re = b_ns_t[k0+6] - b_ns_t[k0];// south=Even-Odd
b_d1( 9+k, m, ithl).im = b_ns_t[k1+6] - b_ns_t[k1];
//printf( "alm2map_pol_der1 b_d1: d/dtheta T, Q, U\n" );
// dfield/dphi/sin(theta)
b_d1( 12+k, m, ithl).re = ( b_ns_p[k0+6] + b_ns_p[k0] ) * one_on_s1;
b_d1( 12+k, m, ithl).im = ( b_ns_p[k1+6] + b_ns_p[k1] ) * one_on_s1;
b_d1( 15+k, m, ithl).re = ( b_ns_p[k0+6] - b_ns_p[k0] ) * one_on_s1;
b_d1( 15+k, m, ithl).im = ( b_ns_p[k1+6] - b_ns_p[k1] ) * one_on_s1;
//printf( "alm2map_pol_der1 b_d1: d/dphi T, Q, U\n" );
}
}// end if( lmax >= l_min )
}// and loop on ithl
}// end loop on m
// warning compilation case OpenMP
//if( do_openmp() == true )// if ( do_openmp())
//{
//recfac.dealloc();
//dalm.dealloc();
//lam_fact.dealloc();
//lam_fact_der.dealloc();
//lam_lm.dealloc();
//}
//b_ns.dealloc();
//b_ns_p.dealloc();
//b_ns_t.dealloc();
}// end of parallel region
cout << "alm2map_pol_der1 first parallel loop done" << '\n';
long nphl, istart_south, istart_north;
//shared mapT, mapQ, mapU, mapT_d_theta, mapQ_d_theta, mapU_d_theta, mapT_d_phi, mapQ_d_phi, mapU_d_phi
//#pragma omp parallel default(none) \
// shared( nside, lmax, mmax, npix, nrings, nphmx, lchk, uchk, b_d1, nph, startpix, kphi0 ) \
// private( ithl, nphl, istart_north, istart_south, ith, ring, bline, k0 )
//{
// warning compilation case OpenMP
//if( do_openmp() == true )// if ( do_openmp())
//{
ring.alloc( nphmx );
bline.alloc( mmax );
//}
printf( "alm2map_pol_der1 2nd parallel region dynamic allocation done\n" );
printf( "alm2map_pol_der1 b_d1 size1: %ld, b_d1 size2: %ld, b_d1 size3: %ld\n", b_d1.size1(), b_d1.size2(), b_d1.size3() );
//#pragma omp for schedule( dynamic, 1 )
for( ithl=0; ithl <= (uchk-lchk); ithl ++ )
{
printf( "alm2map_pol_der1 enter for( ithl=0; ithl <= (uchk-lchk); ithl ++ )\n" );
nphl = nph[ithl];
istart_north = startpix[ithl];
istart_south = npix - istart_north - nphl;
ith = ithl + lchk;
// ---------------------------------------------------------------
// sum_m b(m,theta)*exp(i*m*phi) -> f(phi,theta)
// ---------------------------------------------------------------
long ind;
printf( "alm2map_pol_der1 enter 1st loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
cout << "k0: " << k0 << " mmax: " << mmax << " ithl: " << ithl << '\n';
bline.fill( 0.0 );
for( ind=0; ind < mmax; ind++ )
{
bline[ind].re = b_d1( k0, ind, ithl ).re;
bline[ind].im = b_d1( k0, ind, ithl ).im;
}
printf( "\nalm2map_pol_der1 bline copy from b_d1\n" );
cout << "nside: " << nside << " lmax: " << lmax<< " mmax: " << mmax << " nphl: " << nphl << " ithl: " << ithl << " kphi0[ithl]: " << kphi0[ithl] << '\n';
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // north hemisph. + equator
printf( "alm2map_pol_der1 ring_synthesis T, Q, U north hemisph. + equator done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT[istart_north+ind] = ring[ind];
break;
case 1 :
mapQ[istart_north+ind] = ring[ind];
break;
case 2:
mapU[istart_north+ind] = ring[ind];
break;
}
}
}
printf( "alm2map_pol_der1 enter 2nd loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
for( ind=0; ind < mmax; ind++ )
{
bline[ind] = b_d1( 6+k0, ind, ithl );
}
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // north hemisph. + equator
printf( "alm2map_pol_der1 ring_synthesis T, Q, U d/d_theta north hemisph. + equator done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT_d_theta[istart_north+ind] = ring[ind];
break;
case 1 :
mapQ_d_theta[istart_north+ind] = ring[ind];
break;
case 2:
mapU_d_theta[istart_north+ind] = ring[ind];
break;
}
}
}
printf( "alm2map_pol_der1 enter 3rd loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
for( ind=0; ind < mmax; ind++ )
{
bline[ind] = b_d1( 12+k0, ind, ithl );
}
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // north hemisph. + equator
printf( "alm2map_pol_der1 ring_synthesis T, Q, U d/d_phi north hemisph. + equator done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT_d_phi[istart_north+ind] = ring[ind];
break;
case 1 :
mapQ_d_phi[istart_north+ind] = ring[ind];
break;
case 2:
mapU_d_phi[istart_north+ind] = ring[ind];
break;
}
}
}
if( ith < nrings )
{
printf( "alm2map_pol_der1 enter 4th loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
for( ind=0; ind < mmax; ind++ )
{
bline[ind] = b_d1( 3+k0, ind, ithl );
}
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // south hemisph. w/o equat
printf( "alm2map_pol_der1 ring_synthesis T, Q, U south hemisph. w/o equat done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT[istart_south+ind] = ring[ind];
break;
case 1 :
mapQ[istart_south+ind] = ring[ind];
break;
case 2:
mapU[istart_south+ind] = ring[ind];
break;
}
}
}
printf( "alm2map_pol_der1 enter 5th loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
for( ind=0; ind < mmax; ind++ )
{
bline[ind] = b_d1( 9+k0, ind, ithl );
}
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // south hemisph. w/o equat
printf( "alm2map_pol_der1 ring_synthesis T, Q, U d/d_theta south hemisph. w/o equat done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT_d_theta[istart_south+ind] = ring[ind];
break;
case 1 :
mapQ_d_theta[istart_south+ind] = ring[ind];
break;
case 2:
mapU_d_theta[istart_south+ind] = ring[ind];
break;
}
}
}
printf( "alm2map_pol_der1 enter 6th loop for( k0=0; k0 <= 2; k0++ )\n" );
for( k0=0; k0 <= 2; k0++ )
{
for( ind=0; ind < mmax; ind++ )
{
bline[ind] = b_d1( 15+k0, ind, ithl );
}
ring_synthesis( nside, lmax, mmax, bline, nphl, ring, kphi0[ithl] ); // south hemisph. w/o equat
printf( "alm2map_pol_der1 ring_synthesis T, Q, U d/d_phi south hemisph. w/o equat done\n" );
for( ind=0; ind < nphl; ind++ )
{
switch( k0 )
{
case 0 :
mapT_d_phi[istart_south+ind] = ring[ind];
break;
case 1 :
mapQ_d_phi[istart_south+ind] = ring[ind];
break;
case 2:
mapU_d_phi[istart_south+ind] = ring[ind];
break;
}
}
}
}// end if( ith < nrings )
}// and loop on ithl
// warning compilation case OpenMP
//if( do_openmp() == true )// if ( do_openmp())
//{
//ring.dealloc();
//bline.dealloc();
//}
//}// end of parallel region
cout << "alm2map_pol_der1 second parallel loop done" << '\n';
}// end loop on ichunck
// --------------------
// free memory and exit
// --------------------
// warning compilation case OpenMP
// if( do_openmp() == false )// if (.not. do_openmp())
// {
// lam_fact.dealloc();
// lam_fact_der.dealloc();
// ring.dealloc();
// recfac.dealloc();
// dalm.dealloc();
// lam_lm.dealloc();
// bline.dealloc();
// }
//mfac.dealloc();
//b_d1.dealloc();
//normal_l.dealloc();
}
template void alm2map_pol_der1( const Alm<xcomplex<float> > Alm_T, const Alm<xcomplex<float> > Alm_E, const Alm<xcomplex<float> > Alm_B,
Healpix_Map<float> &mapT, Healpix_Map<float> &mapQ, Healpix_Map<float> &mapU,
Healpix_Map<float> &mapT_d_theta, Healpix_Map<float> &mapQ_d_theta, Healpix_Map<float> &mapU_d_theta,
Healpix_Map<float> &mapT_d_phi, Healpix_Map<float> &mapQ_d_phi, Healpix_Map<float> &mapU_d_phi );
template void alm2map_pol_der1( const Alm<xcomplex<double> > Alm_T, const Alm<xcomplex<double> > Alm_E, const Alm<xcomplex<double> > Alm_B,
Healpix_Map<double> &mapT, Healpix_Map<double> &mapQ, Healpix_Map<double> &mapU,
Healpix_Map<double> &mapT_d_theta, Healpix_Map<double> &mapQ_d_theta, Healpix_Map<double> &mapU_d_theta,
Healpix_Map<double> &mapT_d_phi, Healpix_Map<double> &mapQ_d_phi, Healpix_Map<double> &mapU_d_phi );
*/
#endif
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/sync.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& args) override {
param.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax.resize(tree.param.num_feature * 2);
std::fill(fminmax.begin(), fminmax.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
dmlc::DataIter<ColBatch>* iter = p_fmat->ColIterator();
iter->BeforeFirst();
while (iter->Next()) {
const ColBatch& batch = iter->Value();
for (bst_uint i = 0; i < batch.size; ++i) {
const bst_uint fid = batch.col_index[i];
const ColBatch::Inst& c = batch[i];
if (c.length != 0) {
fminmax[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax[fid * 2 + 0]);
fminmax[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax), fminmax.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax[fid * 2];
bst_float b = fminmax[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax.size(); i += 2) {
const bst_uint fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
unsigned n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const RowBatch::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.split_index();
for (unsigned i = 0; i < inst.length; ++i) {
if (findex == inst[i].index) {
if (inst[i].fvalue < n.split_cond()) {
return n.cleft();
} else {
return n.cright();
}
}
}
return n.cdefault();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.info().root_index;
{
// setup position
position.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position.begin(), position.end(), 0);
} else {
for (size_t i = 0; i < position.size(); ++i) {
position[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position[i] = ~position[i];
}
// mark subsample
if (param.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position[i] = ~position[i];
}
}
}
{
// expand query
qexpand.reserve(256); qexpand.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (size_t i = 0; i < qexpand.size(); ++i) {
const int nid = qexpand[i];
if (!tree[nid].is_leaf()) {
newnodes.push_back(tree[nid].cleft());
newnodes.push_back(tree[nid].cright());
}
}
// use new nodes for qexpand
qexpand = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position[ridx] < 0) {
position[ridx] = ~nid;
} else {
position[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set rest of instances to default position
const RowSet &rowset = p_fmat->buffered_rowset();
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = this->DecodePosition(ridx);
if (tree[nid].is_leaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].cright() == -1) {
position[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].default_left()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const ColBatch& batch,
const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t i = 0; i < batch.size; ++i) {
ColBatch::Inst col = batch[i];
const bst_uint fid = batch.col_index[i];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].is_leaf());
int pid = tree[nid].parent();
// go back to parent, correct those who are not default
if (!tree[nid].is_root() && tree[pid].split_index() == fid) {
if (fvalue < tree[pid].split_cond()) {
this->SetEncodePosition(ridx, tree[pid].cleft());
} else {
this->SetEncodePosition(ridx, tree[pid].cright());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (size_t i = 0; i < nodes.size(); ++i) {
const int nid = nodes[i];
if (!tree[nid].is_leaf()) {
fsplits.push_back(tree[nid].split_index());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator(fsplits);
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (size_t i = 0; i < batch.size; ++i) {
ColBatch::Inst col = batch[i];
const bst_uint fid = batch.col_index[i];
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].is_leaf() && tree[nid].split_index() == fid) {
if (fvalue < tree[nid].split_cond()) {
this->SetEncodePosition(ridx, tree[nid].cleft());
} else {
this->SetEncodePosition(ridx, tree[nid].cright());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<bst_gpair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
const MetaInfo &info = fmat.info();
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats(param));
for (size_t i = 0; i < qexpand.size(); ++i) {
const unsigned nid = qexpand[i];
thread_temp[tid][nid].Clear();
}
}
const RowSet &rowset = fmat.buffered_rowset();
// setup position
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
const bst_uint ridx = rowset[i];
const int nid = position[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair, info, ridx);
}
}
// sum the per thread statistics together
for (size_t j = 0; j < qexpand.size(); ++j) {
const int nid = qexpand[j];
TStats &s = (*p_node_stats)[nid];
s.Clear();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex.begin(), node2workindex.end(), -1);
node2workindex.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand.size(); ++i) {
node2workindex[qexpand[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
cpoinv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpoinv.c, normal z -> c, Fri Sep 28 17:38:09 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_poinv
*
* Performs the Cholesky inversion of a Hermitian positive definite
* matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the Hermitian positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the inverse of A following a
* Cholesky factorization A = U^H*U or A = L*L^H.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_cpoinv
* @sa plasma_dpoinv
* @sa plasma_spoinv
*
******************************************************************************/
int plasma_cpoinv(plasma_enum_t uplo,
int n,
plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_poinv(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_triangular_create(PlasmaComplexFloat, uplo, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_ctr2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_cpoinv(uplo, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2tr(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_poinv
*
* Computes the inverse of a complex Hermitian
* positive definite matrix A using the Cholesky factorization.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* On entry, the Hermitian positive definite matrix A.
* On exit, the upper or lower triangle of the (Hermitian)
* inverse of A, overwriting the input factor U or L.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cpoinv
* @sa plasma_omp_cpoinv
* @sa plasma_omp_cpoinv
* @sa plasma_omp_dpoinv
* @sa plasma_omp_spoinv
*
******************************************************************************/
void plasma_omp_cpoinv(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0) {
return;
}
// Factorize A.
plasma_pcpotrf(uplo, A, sequence, request);
// Invert triangular part.
plasma_pctrtri(uplo, PlasmaNonUnit, A, sequence, request);
// Compute product of upper and lower triangle.
plasma_pclauum(uplo, A, sequence, request);
}
|
omp_dem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: clabra $
// Date: $Date: 2007-03-29 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_OMP_DEM_SEARCH_H_INCLUDED )
#define KRATOS_OMP_DEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "spatial_containers/dem_search.h"
#include "utilities/openmp_utils.h"
// Configures
#include "discrete_particle_configure.h"
#include "geometrical_object_configure.h"
#include "node_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
#include "custom_search/bins_dynamic_objects_periodic.h"
// External includes
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class OMP_DEMSearch : public DEMSearch<OMP_DEMSearch>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of OMP_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(OMP_DEMSearch);
typedef PointType* PtrPointType;
typedef std::vector<PtrPointType>* PointVector;
typedef std::vector<PtrPointType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
//Configure Types
typedef DiscreteParticleConfigure<3> ElementConfigureType; //Element
typedef NodeConfigure<3> NodeConfigureType; //Node
typedef GeometricalConfigure<3> GeometricalConfigureType; //Generic Geometry
//Bin Types
typedef BinsObjectDynamic<ElementConfigureType> BinsType;
typedef BinsObjectDynamicPeriodic<ElementConfigureType> BinsTypePeriodic;
typedef std::unique_ptr<BinsType> BinsUniquePointerType;
typedef BinsObjectDynamic<NodeConfigureType> NodeBinsType;
typedef BinsObjectDynamic<GeometricalConfigureType> GeometricalBinsType;
//GeoimetricalObject
typedef PointerVectorSet<GeometricalObject, IndexedObject> GeometricalObjectType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
OMP_DEMSearch(const double domain_min_x = 0.0, const double domain_min_y = 0.0, const double domain_min_z = 0.0,
const double domain_max_x = -1.0, const double domain_max_y = -1.0, const double domain_max_z = -1.0)
{
mDomainPeriodicity = (domain_min_x <= domain_max_x) ? true : false;
}
/// Destructor.
~OMP_DEMSearch(){
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
// KRATOS_TRY
//
// int MaxNumberOfElements = rStructureElements.size();
//
// ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
// ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
//
// GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
// GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
//
// BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
// SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
//
// for(ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
// BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// for(ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
// SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
//
// #pragma omp parallel
// {
// GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
// DistanceType localResultsDistances(MaxNumberOfElements);
// std::size_t NumberOfResults = 0;
//
// #pragma omp for
// for(std::size_t i = 0; i < elements_sear.size(); i++)
// {
// GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
// DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
//
// NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
//
// rResults[i].reserve(NumberOfResults);
//
// for(GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
// {
// Element::Pointer elem = Kratos::dynamic_pointer_cast<Element>(*it);
// rResults[i].push_back(elem);
// rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
// }
// }
// }
//
// KRATOS_CATCH("")
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for schedule(dynamic, 100) //schedule(guided)
for(int i = 0; i < static_cast<int>(elements_array.size()); i++)
{
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
//MAJOR TODO: creating and destroying (when leaving the function) this BINS is not parallel and takes a significant time if we search at every time step. Can we re-use a bins and avoid allocation and deallocation?? MA
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_array.size()); i++)
{
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_array.size()); i++)
{
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsType bins(elements_ModelPart.begin(), elements_ModelPart.end());
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_array.size()); i++)
{
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = bins.SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nodes_array.size()); i++)
{
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(nodes_array[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nodes_array.size()); i++)
{
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nodes_array.size()); i++)
{
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nodes_array.size()); i++)
{
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for(ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for(ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_sear.size()); i++)
{
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for(GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = Kratos::dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for(ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for(ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_sear.size()); i++)
{
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for(GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = Kratos::dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for(ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for(ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_sear.size()); i++)
{
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for(GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = Kratos::dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for(ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for(ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(elements_sear.size()); i++)
{
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for(GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = Kratos::dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "OpenMPDemSearch" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "OpenMPDemSearch";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///
BinsUniquePointerType GetBins(ElementsContainerType::ContainerType& r_model_part_container)
{
if (mDomainPeriodicity){
return std::unique_ptr<BinsType>(new BinsTypePeriodic(r_model_part_container.begin(), r_model_part_container.end(), this->mDomainMin, this->mDomainMax));
}
else {
return std::unique_ptr<BinsType>(new BinsType(r_model_part_container.begin(), r_model_part_container.end()));
}
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
OMP_DEMSearch& operator=(OMP_DEMSearch const& rOther)
{
return *this;
}
/// Copy constructor.
OMP_DEMSearch(OMP_DEMSearch const& rOther)
{
*this = rOther;
}
///@}
}; // Class DEMSearch
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
// inline std::istream& operator >> (std::istream& rIStream,
// DEMSearch& rThis){return rIStream;}
//
// /// output stream function
// inline std::ostream& operator << (std::ostream& rOStream,
// const DEMSearch& rThis)
// {
// rThis.PrintInfo(rOStream);
// rOStream << std::endl;
// rThis.PrintData(rOStream);
//
// return rOStream;
// }
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_DEM_SEARCH_H_INCLUDED defined
|
option_warn.c | // RUN: %clang_cc1 -verify -Wsource-uses-openmp -o - %s
// RUN: %clang_cc1 -verify -Wsource-uses-openmp -o - %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
int a;
#pragma omp threadprivate(a,b) // expected-warning {{unexpected '#pragma omp ...' in program}}
#pragma omp parallel
|
CSRPlus.c | // ---------------------------------------------------------------
// @brief : CSRPlus matrix implementation file
// @author : Hua Huang <huangh223@gatech.edu>
// Edmond Chow <echow@cc.gatech.edu>
//
// Copyright (c) 2017-2020 Georgia Institute of Technology
// ----------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <omp.h>
#include "CSRPlus.h"
static void qsort_int_dbl_pair(int *key, double *val, int l, int r)
{
int i = l, j = r, tmp_key;
int mid_key = key[(l + r) / 2];
double tmp_val;
while (i <= j)
{
while (key[i] < mid_key) i++;
while (key[j] > mid_key) j--;
if (i <= j)
{
tmp_key = key[i]; key[i] = key[j]; key[j] = tmp_key;
tmp_val = val[i]; val[i] = val[j]; val[j] = tmp_val;
i++; j--;
}
}
if (i < r) qsort_int_dbl_pair(key, val, i, r);
if (j > l) qsort_int_dbl_pair(key, val, l, j);
}
// Initialize a CSRP_mat structure using a COO matrix
void CSRP_init_with_COO_mat(
const int nrow, const int ncol, const int nnz, const int *row,
const int *col, const double *val, CSRP_mat_p *csrp_mat_
)
{
CSRP_mat_p csrp_mat = (CSRP_mat_p) malloc(sizeof(CSRP_mat_s));
csrp_mat->nrow = nrow;
csrp_mat->ncol = ncol;
csrp_mat->nnz = nnz;
csrp_mat->row_ptr = (int*) malloc(sizeof(int) * (nrow + 1));
csrp_mat->col = (int*) malloc(sizeof(int) * nnz);
csrp_mat->val = (double*) malloc(sizeof(double) * nnz);
assert(csrp_mat->row_ptr != NULL);
assert(csrp_mat->col != NULL);
assert(csrp_mat->val != NULL);
csrp_mat->nnz_spos = NULL;
csrp_mat->nnz_epos = NULL;
csrp_mat->first_row = NULL;
csrp_mat->last_row = NULL;
csrp_mat->fr_intact = NULL;
csrp_mat->lr_intact = NULL;
csrp_mat->fr_res = NULL;
csrp_mat->lr_res = NULL;
int *row_ptr = csrp_mat->row_ptr;
int *col_ = csrp_mat->col;
double *val_ = csrp_mat->val;
memset(row_ptr, 0, sizeof(int) * (nrow + 1));
// Get the number of non-zeros in each row
for (int i = 0; i < nnz; i++)
row_ptr[row[i] + 1]++;
// Calculate the displacement of 1st non-zero in each row
for (int i = 2; i <= nrow; i++)
row_ptr[i] += row_ptr[i - 1];
// Use row_ptr to bucket sort col[] and val[]
for (int i = 0; i < nnz; i++)
{
int idx = row_ptr[row[i]];
col_[idx] = col[i];
val_[idx] = val[i];
row_ptr[row[i]]++;
}
// Reset row_ptr
for (int i = nrow; i >= 1; i--)
row_ptr[i] = row_ptr[i - 1];
row_ptr[0] = 0;
// Sort the non-zeros in each row according to column indices
#pragma omp parallel for
for (int i = 0; i < nrow; i++)
qsort_int_dbl_pair(col_, val_, row_ptr[i], row_ptr[i + 1] - 1);
*csrp_mat_ = csrp_mat;
}
// Free a CSRP_mat structure
void CSRP_free(CSRP_mat_p *csrp_mat_)
{
CSRP_mat_p csrp_mat = *csrp_mat_;
if (csrp_mat == NULL) return;
free(csrp_mat->row_ptr);
free(csrp_mat->col);
free(csrp_mat->val);
free(csrp_mat->nnz_spos);
free(csrp_mat->nnz_epos);
free(csrp_mat->first_row);
free(csrp_mat->last_row);
free(csrp_mat->fr_intact);
free(csrp_mat->lr_intact);
free(csrp_mat->fr_res);
free(csrp_mat->lr_res);
*csrp_mat_ = NULL;
}
static void partition_block_equal(const int len, const int nblk, int *displs)
{
int bs0 = len / nblk;
int rem = len % nblk;
int bs1 = (rem > 0) ? bs0 + 1 : bs0;
displs[0] = 0;
for (int i = 0; i < rem; i++)
displs[i + 1] = displs[i] + bs1;
for (int i = rem; i < nblk; i++)
displs[i + 1] = displs[i] + bs0;
}
static int calc_lower_bound(const int *a, int n, int x)
{
int l = 0, h = n;
while (l < h)
{
int mid = l + (h - l) / 2;
if (x <= a[mid]) h = mid;
else l = mid + 1;
}
return l;
}
// Partition a CSR matrix into multiple blocks with the same nnz
// for multiple threads execution of SpMV
void CSRP_partition_multithread(CSRP_mat_p csrp_mat, const int nblk, const int nthread)
{
csrp_mat->nblk = nblk;
csrp_mat->nthread = nthread;
csrp_mat->nnz_spos = (int*) malloc(sizeof(int) * nblk);
csrp_mat->nnz_epos = (int*) malloc(sizeof(int) * nblk);
csrp_mat->first_row = (int*) malloc(sizeof(int) * nblk);
csrp_mat->last_row = (int*) malloc(sizeof(int) * nblk);
csrp_mat->fr_intact = (int*) malloc(sizeof(int) * nblk);
csrp_mat->lr_intact = (int*) malloc(sizeof(int) * nblk);
csrp_mat->fr_res = (double*) malloc(sizeof(double) * nblk);
csrp_mat->lr_res = (double*) malloc(sizeof(double) * nblk);
assert(csrp_mat->nnz_spos != NULL);
assert(csrp_mat->nnz_epos != NULL);
assert(csrp_mat->first_row != NULL);
assert(csrp_mat->last_row != NULL);
assert(csrp_mat->fr_intact != NULL);
assert(csrp_mat->lr_intact != NULL);
assert(csrp_mat->fr_res != NULL);
assert(csrp_mat->lr_res != NULL);
int nnz = csrp_mat->nnz;
int nrow = csrp_mat->nrow;
int *row_ptr = csrp_mat->row_ptr;
int *nnz_displs = (int *) malloc((nblk + 1) * sizeof(int));
partition_block_equal(nnz, nblk, nnz_displs);
for (int iblk = 0; iblk < nblk; iblk++)
{
int iblk_nnz_spos = nnz_displs[iblk];
int iblk_nnz_epos = nnz_displs[iblk + 1] - 1;
int spos_in_row = calc_lower_bound(row_ptr, nrow + 1, iblk_nnz_spos);
int epos_in_row = calc_lower_bound(row_ptr, nrow + 1, iblk_nnz_epos);
if (row_ptr[spos_in_row] > iblk_nnz_spos) spos_in_row--;
if (row_ptr[epos_in_row] > iblk_nnz_epos) epos_in_row--;
// Note: It is possible that the last nnz is the first nnz in a row,
// and there are some empty rows between the last row and previous non-empty row
while (row_ptr[epos_in_row] == row_ptr[epos_in_row + 1]) epos_in_row++;
csrp_mat->nnz_spos[iblk] = iblk_nnz_spos;
csrp_mat->nnz_epos[iblk] = iblk_nnz_epos;
csrp_mat->first_row[iblk] = spos_in_row;
csrp_mat->last_row[iblk] = epos_in_row;
if ((epos_in_row - spos_in_row) >= 1)
{
int fr_intact = (iblk_nnz_spos == row_ptr[spos_in_row]);
int lr_intact = (iblk_nnz_epos == row_ptr[epos_in_row + 1] - 1);
csrp_mat->fr_intact[iblk] = fr_intact;
csrp_mat->lr_intact[iblk] = lr_intact;
} else {
// Mark that this thread only handles a segment of a row
csrp_mat->fr_intact[iblk] = 0;
csrp_mat->lr_intact[iblk] = -1;
}
}
csrp_mat->last_row[nblk - 1] = nrow - 1;
csrp_mat->nnz_epos[nblk - 1] = row_ptr[nrow] - 1;
free(nnz_displs);
}
// Use Use first-touch policy to optimize the storage of CSR arrays in a CSRP_mat structure
void CSRP_optimize_NUMA(CSRP_mat_p csrp_mat)
{
int nnz = csrp_mat->nnz;
int nrow = csrp_mat->nrow;
int nblk = csrp_mat->nblk;
int nthread = csrp_mat->nthread;
int *row_ptr = (int*) malloc(sizeof(int) * (nrow + 1));
int *col = (int*) malloc(sizeof(int) * nnz);
double *val = (double*) malloc(sizeof(double) * nnz);
assert(row_ptr != NULL);
assert(col != NULL);
assert(val != NULL);
#pragma omp parallel num_threads(nthread)
{
#pragma omp for schedule(static)
for (int i = 0; i < nrow + 1; i++)
row_ptr[i] = csrp_mat->row_ptr[i];
#pragma omp for schedule(static)
for (int iblk = 0; iblk < nblk; iblk++)
{
int nnz_spos = csrp_mat->nnz_spos[iblk];
int nnz_epos = csrp_mat->nnz_epos[iblk];
for (int i = nnz_spos; i <= nnz_epos; i++)
{
col[i] = csrp_mat->col[i];
val[i] = csrp_mat->val[i];
}
}
}
free(csrp_mat->row_ptr);
free(csrp_mat->col);
free(csrp_mat->val);
csrp_mat->row_ptr = row_ptr;
csrp_mat->col = col;
csrp_mat->val = val;
}
static double CSR_SpMV_row_seg(
const int seg_len, const int *__restrict col,
const double *__restrict val, const double *__restrict x
)
{
register double res = 0.0;
#pragma omp simd
for (int idx = 0; idx < seg_len; idx++)
res += val[idx] * x[col[idx]];
return res;
}
static void CSR_SpMV_row_block(
const int srow, const int erow,
const int *row_ptr, const int *col, const double *val,
const double *__restrict x, double *__restrict y
)
{
for (int irow = srow; irow < erow; irow++)
{
register double res = 0.0;
#pragma omp simd
for (int idx = row_ptr[irow]; idx < row_ptr[irow + 1]; idx++)
res += val[idx] * x[col[idx]];
y[irow] = res;
}
}
static void CSRP_SpMV_block(CSRP_mat_p csrp_mat, const int iblk, const double *x, double *y)
{
int *row_ptr = csrp_mat->row_ptr;
int *col = csrp_mat->col;
int *first_row = csrp_mat->first_row;
int *last_row = csrp_mat->last_row;
int *nnz_spos = csrp_mat->nnz_spos;
int *nnz_epos = csrp_mat->nnz_epos;
int *fr_intact = csrp_mat->fr_intact;
int *lr_intact = csrp_mat->lr_intact;
double *val = csrp_mat->val;
double *fr_res = csrp_mat->fr_res;
double *lr_res = csrp_mat->lr_res;
if (first_row[iblk] == last_row[iblk])
{
// This thread handles a segment on 1 row
int iblk_nnz_spos = nnz_spos[iblk];
int iblk_nnz_epos = nnz_epos[iblk];
int iblk_seg_len = iblk_nnz_epos - iblk_nnz_spos + 1;
fr_res[iblk] = CSR_SpMV_row_seg(iblk_seg_len, col + iblk_nnz_spos, val + iblk_nnz_spos, x);
lr_res[iblk] = 0.0;
y[first_row[iblk]] = 0.0;
} else {
// This thread handles segments on multiple rows
int first_intact_row = first_row[iblk];
int last_intact_row = last_row[iblk];
if (fr_intact[iblk] == 0)
{
int iblk_nnz_spos = nnz_spos[iblk];
int iblk_nnz_epos = row_ptr[first_intact_row + 1];
int iblk_seg_len = iblk_nnz_epos - iblk_nnz_spos;
fr_res[iblk] = CSR_SpMV_row_seg(iblk_seg_len, col + iblk_nnz_spos, val + iblk_nnz_spos, x);
y[first_intact_row] = 0.0;
first_intact_row++;
}
if (lr_intact[iblk] == 0)
{
int iblk_nnz_spos = row_ptr[last_intact_row];
int iblk_nnz_epos = nnz_epos[iblk];
int iblk_seg_len = iblk_nnz_epos - iblk_nnz_spos + 1;
lr_res[iblk] = CSR_SpMV_row_seg(iblk_seg_len, col + iblk_nnz_spos, val + iblk_nnz_spos, x);
y[last_intact_row] = 0.0;
last_intact_row--;
}
CSR_SpMV_row_block(
first_intact_row, last_intact_row + 1,
row_ptr, col, val, x, y
);
}
}
// Perform OpenMP parallelized CSR SpMV with a CSRP_mat structure
void CSRP_SpMV(CSRP_mat_p csrp_mat, const double *x, double *y)
{
int nblk = csrp_mat->nblk;
int nthread = csrp_mat->nthread;
#pragma omp parallel for schedule(static) num_threads(nthread)
for (int iblk = 0; iblk < nblk; iblk++)
CSRP_SpMV_block(csrp_mat, iblk, x, y);
for (int iblk = 0; iblk < nblk; iblk++)
{
if (csrp_mat->fr_intact[iblk] == 0)
{
int first_row = csrp_mat->first_row[iblk];
y[first_row] += csrp_mat->fr_res[iblk];
}
if (csrp_mat->lr_intact[iblk] == 0)
{
int last_row = csrp_mat->last_row[iblk];
y[last_row] += csrp_mat->lr_res[iblk];
}
}
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if (((noise_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image,exception);
(void) NegateImage(charcoal_image,MagickFalse,exception);
(void) GrayscaleImage(charcoal_image,image->intensity,exception);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(colorize_image,q) <= (QuantumRange/2)))
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,size_t *,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MagickPathExtent],
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
depth,
length,
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
depth=0;
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
i=(ssize_t) alpha;
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
if (length != 0)
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case IndexPixelChannel:
return(0.0);
case IntensityPixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double)GetImageDepth(image, fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,size_t *depth,double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
char
*q,
subexpression[MagickPathExtent];
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity >= ErrorException)
return(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
return(0.0);
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,
beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta,
exception);
return(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(*depth)++;
if (*depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
(*depth)--;
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha < 0.0)
return(0.0);
if (alpha > 1.0)
return(1.0);
return(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
return(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
return((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return(log10(alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gamma=alpha-floor((alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
return(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0)
return(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha >= 0.0)
return(floor(alpha));
return(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
return(*beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
size_t
depth;
depth=0;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if (((fx_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImage)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
Image
*canvas,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (implode_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas->columns;
center.y=0.5*canvas->rows;
radius=center.x;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
{
scale.x=(double) canvas->rows/(double) canvas->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(canvas,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(canvas,implode_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(implode_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas,interpolate_view,implode_image,
method,(double) (factor*delta.x/scale.x+center.x),(double) (factor*
delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(canvas,ImplodeImageTag,progress++,
canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if (((morph_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(morph_images,p) <= (QuantumRange/2)))
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
geometry[MagickPathExtent],
*text;
DrawInfo
*annotate_info;
ImageInfo
*image_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
image_info=AcquireImageInfo();
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
text=InterpretImageProperties(image_info,(Image *) image,caption,
exception);
image_info=DestroyImageInfo(image_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*
(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
if (GetPixelWriteMask(random_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(random_image);
continue;
}
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image,exception);
(void) NegateImage(dodge_image,MagickFalse,exception);
(void) TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(image,GetPixelRed(left_image,p),r);
SetPixelGreen(image,GetPixelGreen(right_image,q),r);
SetPixelBlue(image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
Image
*canvas,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (swirl_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
center.x=(double) canvas->columns/2.0;
center.y=(double) canvas->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
scale.x=(double) canvas->rows/(double) canvas->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(canvas,swirl_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(swirl_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image,
method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double)
((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
continue;
}
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0*
(weight*weight)));
pixel.alpha=GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_view,
*wave_view;
Image
*canvas,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas,OpaqueAlpha,exception);
wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(canvas,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method,
(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns),
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
GB_unop__identity_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint32_uint64
// op(A') function: GB_unop_tran__identity_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint32_uint64
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__eq_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32)
// A*D function (colscale): GB (_AxD__eq_uint32)
// D*A function (rowscale): GB (_DxB__eq_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32)
// C=scalar+B GB (_bind1st__eq_uint32)
// C=scalar+B' GB (_bind1st_tran__eq_uint32)
// C=A+scalar GB (_bind2nd__eq_uint32)
// C=A'+scalar GB (_bind2nd_tran__eq_uint32)
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB010-lastprivatemissing-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This loop has loop-carried output-dependence due to x=... at line 63.
The problem can be solved by using lastprivate(x) .
Data race pair: x@63:5 vs. x@63:5
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, x;
int len = 10000;
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
#pragma cetus private(i)
#pragma cetus lastprivate(x)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i) lastprivate(x)
for (i=0; i<len; i ++ )
{
x=i;
}
printf("x=%d", x);
_ret_val_0=0;
return _ret_val_0;
}
|
GB_binop__second_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp32)
// A*D function (colscale): GB (_AxD__second_fp32)
// D*A function (rowscale): GB (_DxB__second_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = bij
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP32 || GxB_NO_SECOND_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
integration.c | #include <stdio.h>
#include <omp.h>
#define N 10000
double f(double x) { return 4. / (1. + x * x); }
int main() {
double a = 0., b = 1.,
h = (b - a) / N, sum = 0.;
#pragma omp parallel for reduction(+:sum)
for (int i = 1; i < N; ++i) { sum += f(a + i * h); }
printf("%f\n", (sum + 3.) * h);
return 0;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
openmp.c | #include<omp.h>
double dot(double* a, double* b, long N){
double s = 0.0;
#pragma omp parallel for reduction(+: s)
for(long n = 0; n < N; n++){
s += a[n]*b[n];
}
return s;
}
void cdot(double* c, double* a, double* b, long N){
double r = 0.0, i = 0.0;
#pragma omp parallel for reduction(+: r, i)
for(long n = 0; n < N; n++){
r += a[2*n] * b[2*n ] + a[2*n+1] * b[2*n+1];
i += a[2*n] * b[2*n+1] - a[2*n+1] * b[2*n ];
}
c[0] = r;
c[1] = i;
return;
}
void cdot3(double* c, double* x, double* A, double* y, long M, long N){
double sr = 0.0, si = 0.0;
#pragma omp parallel for reduction(+: sr, si)
for (long n = 0; n < N; n++){
double tr = 0.0, ti = 0.0;
for(long m = 0; m < M; m++){
tr += x[2*m] * A[2*m + 2*n*N] + x[2*m+1] * A[2*m+1 + 2*n*N];
ti += x[2*m] * A[2*m+1 + 2*n*N] - x[2*m+1] * A[2*m + 2*n*N];
}
sr += tr * y[2*n ] - ti * y[2*n+1];
si += tr * y[2*n+1] + ti * y[2*n ];
}
c[0] = sr;
c[1] = si;
return;
}
void conv(double* B, double* A, double* K, long M, long N){
const long offset = 2;
#pragma omp parallel for collapse(2)
for (long i = offset; i < N-offset; i++){
for (long j = offset; j < M-offset; j++){
double tmp = 0.0;
for (long k = -offset; k < offset + 1; k++){
for (long l = -offset; l < offset + 1; l++){
tmp += A[(j+l) + (i+k)*M] * K[(l+offset) + (k+offset)*(2*offset+1)];
}
}
B[(j-offset) + (i-offset) * (M-2*offset)] = tmp;
}
}
return;
}
|
matrix.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
/**
* Initialize a new dense matrix
*
* @param mtx a valid pointer to an uninitialized ptiMatrix variable
* @param nrows the number of rows
* @param ncols the number of columns
*
* The memory layout of this dense matrix is a flat 2D array, with `ncols`
* rounded up to multiples of 8
*/
int ptiNewMatrix(ptiMatrix *mtx, ptiIndex const nrows, ptiIndex const ncols) {
mtx->nrows = nrows;
mtx->ncols = ncols;
mtx->cap = nrows != 0 ? nrows : 1;
mtx->stride = ((ncols-1)/8+1)*8;
#ifdef _ISOC11_SOURCE
mtx->values = aligned_alloc(8 * sizeof (ptiValue), mtx->cap * mtx->stride * sizeof (ptiValue));
#elif _POSIX_C_SOURCE >= 200112L
{
int result = posix_memalign((void **) &mtx->values, 8 * sizeof (ptiValue), mtx->cap * mtx->stride * sizeof (ptiValue));
if(result != 0) {
mtx->values = NULL;
}
}
#else
mtx->values = malloc(mtx->cap * mtx->stride * sizeof (ptiValue));
#endif
pti_CheckOSError(!mtx->values, "Mtx New");
return 0;
}
/**
* Build a matrix with random number
*
* @param mtx a pointer to an uninitialized matrix
* @param nrows fill the specified number of rows
* @param ncols fill the specified number of columns
*
* The matrix is filled with uniform distributed pseudorandom number in [0, 1]
* The random number will have a precision of 31 bits out of 51 bits
*/
int ptiRandomizeMatrix(ptiMatrix *mtx) {
srand(time(NULL));
for(ptiIndex i=0; i<mtx->nrows; ++i)
for(ptiIndex j=0; j<mtx->ncols; ++j) {
mtx->values[i * mtx->stride + j] = i + j + 1; //ptiRandomValue();
}
return 0;
}
/**
* Fill an identity dense matrix
*
* @param mtx a pointer to an uninitialized matrix
* @param nrows fill the specified number of rows
* @param ncols fill the specified number of columns
*
*/
int ptiIdentityMatrix(ptiMatrix *mtx) {
ptiIndex const nrows = mtx->nrows;
ptiIndex const ncols = mtx->ncols;
assert(nrows == ncols);
for(ptiIndex i=0; i<nrows; ++i)
for(ptiIndex j=0; j<ncols; ++j)
mtx->values[i * mtx->stride + j] = 0;
for(ptiIndex i=0; i<nrows; ++i)
mtx->values[i * mtx->stride + i] = 1;
return 0;
}
/**
* Fill an existed dense matrix with a specified constant
*
* @param mtx a pointer to a valid matrix
* @param val a given value constant
*
*/
int ptiConstantMatrix(ptiMatrix *mtx, ptiValue const val) {
for(ptiIndex i=0; i<mtx->nrows; ++i)
for(ptiIndex j=0; j<mtx->ncols; ++j)
mtx->values[i * mtx->stride + j] = val;
return 0;
}
/**
* Shuffle matrix row indices.
*
* @param[in] mtx matrix to be shuffled
* @param[out] map_inds is the renumbering mapping
*
*/
void ptiMatrixInverseShuffleIndices(ptiMatrix *mtx, ptiIndex * mode_map_inds) {
/* Renumber matrix rows */
ptiIndex new_i;
ptiValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (ptiValue));
for(ptiIndex i=0; i<mtx->nrows; ++i) {
new_i = mode_map_inds[i];
for(ptiIndex j=0; j<mtx->ncols; ++j) {
tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j];
}
}
free(mtx->values);
mtx->values = tmp_values;
}
/**
* Copy a dense matrix to an uninitialized dense matrix
*
* @param dest a pointer to an uninitialized dense matrix
* @param src a pointer to an existing valid dense matrix
*
* The contents of `src` will be copied to `dest`.
*/
int ptiCopyMatrix(ptiMatrix *dest, const ptiMatrix *src) {
int result = ptiNewMatrix(dest, src->nrows, src->ncols);
pti_CheckError(result, "Mtx Copy", NULL);
assert(dest->stride == src->stride);
memcpy(dest->values, src->values, dest->nrows * dest->stride * sizeof (ptiValue));
return 0;
}
/**
* Add a row to the end of dense matrix
*
* @param mtx a pointer to a valid matrix
* @param values an array of data to be added
*/
int ptiAppendMatrix(ptiMatrix *mtx, const ptiValue values[]) {
if(mtx->cap <= mtx->nrows) {
#ifndef MEMCHECK_MODE
ptiIndex newcap = mtx->cap + mtx->cap/2;
#else
ptiIndex newcap = mtx->nrows+1;
#endif
ptiValue *newdata;
#ifdef _ISOC11_SOURCE
newdata = aligned_alloc(8 * sizeof (ptiValue), newcap * mtx->stride * sizeof (ptiValue));
#elif _POSIX_C_SOURCE >= 200112L
{
int result = posix_memalign((void **) &newdata, 8 * sizeof (ptiValue), newcap * mtx->stride * sizeof (ptiValue));
if(result != 0) {
newdata = NULL;
}
}
#else
newdata = malloc(newcap * mtx->stride * sizeof (ptiValue));
#endif
pti_CheckOSError(!newdata, "Mtx Append");
memcpy(newdata, mtx->values, mtx->nrows * mtx->stride * sizeof (ptiValue));
free(mtx->values);
mtx->cap = newcap;
mtx->values = newdata;
}
if(values != NULL) {
memcpy(&mtx->values[mtx->nrows * mtx->stride], values, mtx->ncols * sizeof (ptiValue));
}
++ mtx->nrows;
return 0;
}
/**
* Modify the number of rows in a dense matrix
*
* @param mtx a pointer to a valid matrix
* @param new_nrows the new number of rows `mtx` will have
*/
int ptiResizeMatrix(ptiMatrix *mtx, ptiIndex const new_nrows) {
ptiValue *newdata;
#ifdef _ISOC11_SOURCE
newdata = aligned_alloc(8 * sizeof (ptiValue), new_nrows * mtx->stride * sizeof (ptiValue));
#elif _POSIX_C_SOURCE >= 200112L
{
int result = posix_memalign((void **) &newdata, 8 * sizeof (ptiValue), new_nrows * mtx->stride * sizeof (ptiValue));
if(result != 0) {
newdata = NULL;
}
}
#else
newdata = malloc(new_nrows * mtx->stride * sizeof (ptiValue));
#endif
pti_CheckOSError(!newdata, "Mtx Resize");
memcpy(newdata, mtx->values, mtx->nrows * mtx->stride * sizeof (ptiValue));
free(mtx->values);
mtx->nrows = new_nrows;
mtx->cap = new_nrows;
mtx->values = newdata;
return 0;
}
/**
* Release the memory buffer a dense matrix is holding
*
* @param mtx a pointer to a valid matrix
*
* By using `ptiFreeMatrix`, a valid matrix would become uninitialized and
* should not be used anymore prior to another initialization
*/
void ptiFreeMatrix(ptiMatrix *mtx) {
free(mtx->values);
mtx->nrows = 0;
mtx->ncols = 0;
mtx->cap = 0;
mtx->stride = 0;
}
/**** ptiMatrix Operations ****/
int ptiMatrixDotMul(ptiMatrix const * A, ptiMatrix const * B, ptiMatrix const * C)
{
ptiIndex nrows = A->nrows;
ptiIndex ncols = A->ncols;
ptiIndex stride = A->stride;
assert(nrows == B->nrows && nrows == C->nrows);
assert(ncols == B->ncols && ncols == C->ncols);
assert(stride == B->stride && stride == C->stride);
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
C->values[i*stride+j] = A->values[i*stride+j] * B->values[i*stride+j];
}
}
return 0;
}
int ptiMatrixDotMulSeq(ptiIndex const mode, ptiIndex const nmodes, ptiMatrix ** mats)
{
ptiIndex const nrows = mats[0]->nrows;
ptiIndex const ncols = mats[0]->ncols;
ptiIndex const stride = mats[0]->stride;
for(ptiIndex m=1; m<nmodes+1; ++m) {
assert(mats[m]->ncols == ncols);
assert(mats[m]->nrows == nrows);
assert(mats[m]->stride == stride);
}
ptiValue * ovals = mats[nmodes]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
ovals[i * stride + j] = 1;
}
}
for(ptiIndex m=1; m < nmodes; ++m) {
ptiIndex const pm = (mode + m) % nmodes;
ptiValue const * vals = mats[pm]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
ovals[i * stride + j] *= vals[i * stride + j];
}
}
}
return 0;
}
int ptiMatrixDotMulSeqCol(ptiIndex const mode, ptiIndex const nmodes, ptiMatrix ** mats)
{
ptiIndex const nrows = mats[0]->nrows;
ptiIndex const ncols = mats[0]->ncols;
ptiIndex const stride = mats[0]->stride;
// printf("stride: %lu\n", stride);
for(ptiIndex m=1; m<nmodes+1; ++m) {
assert(mats[m]->ncols == ncols);
assert(mats[m]->nrows == nrows);
assert(mats[m]->stride == stride);
}
ptiValue * ovals = mats[nmodes]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
for(ptiIndex i=0; i < nrows; ++i) {
ovals[j * stride + i] = 1;
}
}
for(ptiIndex m=1; m < nmodes; ++m) {
ptiIndex const pm = (mode + m) % nmodes;
ptiValue const * vals = mats[pm]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
for(ptiIndex i=0; i < nrows; ++i) {
ovals[j * stride + i] *= vals[j * stride + i];
}
}
}
return 0;
}
/* mats (aTa) only stores upper triangle elements. */
int ptiMatrixDotMulSeqTriangle(ptiIndex const mode, ptiIndex const nmodes, ptiMatrix ** mats)
{
ptiIndex const nrows = mats[0]->nrows;
ptiIndex const ncols = mats[0]->ncols;
ptiIndex const stride = mats[0]->stride;
for(ptiIndex m=1; m<nmodes+1; ++m) {
assert(mats[m]->ncols == ncols);
assert(mats[m]->nrows == nrows);
}
ptiValue * ovals = mats[nmodes]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
ovals[j * stride + i] = 1.0;
}
}
for(ptiIndex m=1; m < nmodes; ++m) {
ptiIndex const pm = (mode + m) % nmodes;
ptiValue const * vals = mats[pm]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=i; j < ncols; ++j) {
ovals[i * stride + j] *= vals[i * stride + j];
}
}
}
/* Copy upper triangle to lower part */
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < i; ++j) {
ovals[i * stride + j] = ovals[j * stride + i];
}
}
return 0;
}
// Row-major
int ptiMatrix2Norm(ptiMatrix * const A, ptiValue * const lambda)
{
ptiIndex const nrows = A->nrows;
ptiIndex const ncols = A->ncols;
ptiIndex const stride = A->stride;
ptiValue * const vals = A->values;
ptiValue * buffer_lambda;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
lambda[j] = 0.0;
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
#pragma omp master
{
buffer_lambda = (ptiValue *)malloc(nthreads * ncols * sizeof(ptiValue));
for(ptiNnzIndex j=0; j < nthreads * ncols; ++j)
buffer_lambda[j] = 0.0;
}
}
#pragma omp parallel
{
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
ptiValue * loc_lambda = buffer_lambda + tid * ncols;
#pragma omp for
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
loc_lambda[j] += vals[i*stride + j] * vals[i*stride + j];
}
}
#pragma omp for
for(ptiIndex j=0; j < ncols; ++j) {
for(int i=0; i < nthreads; ++i) {
lambda[j] += buffer_lambda[i*ncols + j];
}
}
} /* end parallel pragma */
#else
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
lambda[j] += vals[i*stride + j] * vals[i*stride + j];
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
lambda[j] = sqrt(lambda[j]);
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
vals[i*stride + j] /= lambda[j];
}
}
#ifdef HIPARTI_USE_OPENMP
free(buffer_lambda);
#endif
return 0;
}
// Row-major
int ptiMatrixMaxNorm(ptiMatrix * const A, ptiValue * const lambda)
{
ptiIndex const nrows = A->nrows;
ptiIndex const ncols = A->ncols;
ptiIndex const stride = A->stride;
ptiValue * const vals = A->values;
ptiValue * buffer_lambda;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
lambda[j] = 0.0;
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
#pragma omp master
{
buffer_lambda = (ptiValue *)malloc(nthreads * ncols * sizeof(ptiValue));
for(ptiNnzIndex j=0; j < nthreads * ncols; ++j)
buffer_lambda[j] = 0.0;
}
}
#pragma omp parallel
{
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
ptiValue * loc_lambda = buffer_lambda + tid * ncols;
#pragma omp for
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
if(vals[i*stride + j] > loc_lambda[j])
loc_lambda[j] = vals[i*stride + j];
}
}
#pragma omp for
for(ptiIndex j=0; j < ncols; ++j) {
for(int i=0; i < nthreads; ++i) {
if(buffer_lambda[i*ncols + j] > lambda[j])
lambda[j] = buffer_lambda[i*ncols + j];
}
}
} /* end parallel pragma */
#else
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
if(vals[i*stride + j] > lambda[j])
lambda[j] = vals[i*stride + j];
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex j=0; j < ncols; ++j) {
if(lambda[j] < 1)
lambda[j] = 1;
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i=0; i < nrows; ++i) {
for(ptiIndex j=0; j < ncols; ++j) {
vals[i*stride + j] /= lambda[j];
}
}
#ifdef HIPARTI_USE_OPENMP
free(buffer_lambda);
#endif
return 0;
}
void GetFinalLambda(
ptiIndex const rank,
ptiIndex const nmodes,
ptiMatrix ** mats,
ptiValue * const lambda)
{
ptiValue * tmp_lambda = (ptiValue *) malloc(rank * sizeof(*tmp_lambda));
for(ptiIndex m=0; m < nmodes; ++m) {
ptiMatrix2Norm(mats[m], tmp_lambda);
for(ptiIndex r=0; r < rank; ++r) {
lambda[r] *= tmp_lambda[r];
}
}
free(tmp_lambda);
}
|
elastic_avx.h | //*****************************************************************************
// Title : src/equation_avx/elastic_avx.h
// Author : Tanabe Yuta
// Date : 2021/02/13
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#include <immintrin.h>
// compile option for g++(MinGW) : -mavx
namespace PANSLBM2 {
namespace EL {
template<class T, template<class>class P>void Macro(T, T &, T &, T &, T &, T &, T &, const T *, int); // Function of updating macroscopic values of EL for 2D
template<class T, template<class>class P>void Macro(T, T &, T &, T &, T &, T &, T &, const T *, T, int); // Function of updating macroscopic values of EL with topology optimization for 2D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T, T, T, T, T); // Function of getting equilibrium of EL for 2D
// Function of updating macroscopic values of EL for 2D
template<class P>
void Macro(const __m256d &__rho, __m256d &__ux, __m256d & __uy, __m256d &__sxx, __m256d &__sxy, __m256d &__syx, __m256d &__syy, const __m256d *__f) {
__ux = _mm256_setzero_pd();
__uy = _mm256_setzero_pd();
__sxx = _mm256_setzero_pd();
__sxy = _mm256_setzero_pd();
__syx = _mm256_setzero_pd();
__syy = _mm256_setzero_pd();
for (int c = 1; c < P::nc; ++c) {
__ux = _mm256_add_pd(__ux, _mm256_mul_pd(P::__cx[c], __f[c]));
__uy = _mm256_add_pd(__uy, _mm256_mul_pd(P::__cy[c], __f[c]));
__sxx = _mm256_sub_pd(__sxx, _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cx[c]), __f[c]));
__sxy = _mm256_sub_pd(__sxy, _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cy[c]), __f[c]));
__syx = _mm256_sub_pd(__syx, _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cx[c]), __f[c]));
__syy = _mm256_sub_pd(__syy, _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cy[c]), __f[c]));
}
__m256d __invrho = _mm256_div_pd(_mm256_set1_pd(1.0), __rho);
__ux = _mm256_mul_pd(__ux, __invrho);
__uy = _mm256_mul_pd(__uy, __invrho);
}
// Function of updating macroscopic values of EL for 2D
template<class P>
void Macro(const __m256d &__rho, __m256d &__ux, __m256d & __uy, __m256d &__sxx, __m256d &__sxy, __m256d &__syx, __m256d &__syy, const __m256d *__f, const __m256d &__gamma) {
Macro<P>(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f);
__sxx = _mm256_mul_pd(__sxx, __gamma);
__sxy = _mm256_mul_pd(__sxy, __gamma);
__syx = _mm256_mul_pd(__syx, __gamma);
__syy = _mm256_mul_pd(__syy, __gamma);
}
// Function of getting equilibrium of EL for 2D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__sxx, const __m256d &__sxy, const __m256d &__syx, const __m256d &__syy) {
__m256d __trs = _mm256_add_pd(__sxx, __syy);
for (int c = 0; c < P::nc; ++c) {
__m256d __cu = _mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy));
__m256d __csc = _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cx[c]), __sxx), _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cy[c]), __sxy)), _mm256_add_pd(_mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cx[c]), __syx), _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cy[c]), __syy)));
__feq[c] = _mm256_mul_pd(__ei[c], _mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_mul_pd(__rho, __cu)), _mm256_mul_pd(_mm256_set1_pd(4.5), __csc)), _mm256_mul_pd(_mm256_set1_pd(1.5), __trs)));
}
}
// Function of Update macro and Collide of NS for 2D
template<template<class>class P>
void MacroCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_sxx, double *_sxy, double *_syx, double *_syy, double _tau, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/_tau, iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0, f and rho
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
__m256d __rho = _mm256_loadu_pd(&_rho[idx]);
// Update macro
__m256d __ux, __uy, __sxx, __sxy, __syx, __syy;
Macro<P<double> >(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_sxx[idx], __sxx);
_mm256_storeu_pd(&_sxy[idx], __sxy);
_mm256_storeu_pd(&_syx[idx], __syx);
_mm256_storeu_pd(&_syy[idx], __syy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __sxx, __sxy, __syx, __syy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
T ux, uy, sxx, sxy, syx, syy;
Macro<double, P>(_rho[idx], ux, uy, sxx, sxy, syx, syy, _p.f, idx);
// Save macro if need
if (_issave) {
_ux[idx] = ux;
_uy[idx] = uy;
_sxx[idx] = sxx;
_sxy[idx] = sxy;
_syx[idx] = syx;
_syy[idx] = syy;
}
// Collide
Equilibrium<double, P>(feq, _rho[idx], ux, uy, sxx, sxy, syx, syy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro and Collide of NS for 2D
template<template<class>class P>
void MacroExtendedCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_sxx, double *_sxy, double *_syx, double *_syy, double _tau, const T *_gamma, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/_tau, iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0, f and rho
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
__m256d __rho = _mm256_loadu_pd(&_rho[idx]), __gamma = _mm256_loadu_pd(&_gamma[idx]);
// Update macro
__m256d __ux, __uy, __sxx, __sxy, __syx, __syy;
Macro<P<double> >(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f, __gamma);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_sxx[idx], __sxx);
_mm256_storeu_pd(&_sxy[idx], __sxy);
_mm256_storeu_pd(&_syx[idx], __syx);
_mm256_storeu_pd(&_syy[idx], __syy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __sxx, __sxy, __syx, __syy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double ux, uy, sxx, sxy, syx, syy;
Macro<double, P>(_rho[idx], ux, uy, sxx, sxy, syx, syy, _p.f, _gamma[idx], idx);
// Save macro if need
if (_issave) {
_ux[idx] = ux;
_uy[idx] = uy;
_sxx[idx] = sxx;
_sxy[idx] = sxy;
_syx[idx] = syx;
_syy[idx] = syy;
}
// Collide
Equilibrium<double, P>(feq, _rho[idx], ux, uy, sxx, sxy, syx, syy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
}
} |
DRB024-simdtruedep-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has data races due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@66:5 vs. a[i]@66:12
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[100], b[100];
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
#pragma omp simd
for (i=0;i<len-1;i++)
a[i+1]=a[i]+b[i];
for (i=0;i<len;i++)
printf("i=%d a[%d]=%d\n",i,i,a[i]);
return 0;
}
|
GB_binop__ge_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp64)
// A*D function (colscale): GB (_AxD__ge_fp64)
// D*A function (rowscale): GB (_DxB__ge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp64)
// C=scalar+B GB (_bind1st__ge_fp64)
// C=scalar+B' GB (_bind1st_tran__ge_fp64)
// C=A+scalar GB (_bind2nd__ge_fp64)
// C=A'+scalar GB (_bind2nd_tran__ge_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ligra.h | // This code is part of the project "Ligra: A Lightweight Graph Processing
// Framework for Shared Memory", presented at Principles and Practice of
// Parallel Programming, 2013.
// Copyright (c) 2013 Julian Shun and Guy Blelloch
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef LIGRA_H
#define LIGRA_H
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cstring>
#include <string>
#include <algorithm>
#include <cassert>
#include "parallel.h"
#include "gettime.h"
#include "timer.h" //timer from GAP
#include "utils.h"
#include "vertex.h"
#include "compressedVertex.h"
#include "vertexSubset.h"
#include "graph.h"
#include "IO.h"
#include "parseCommandLine.h"
#include "gettime.h"
#include "index_map.h"
#include "edgeMap_utils.h"
using namespace std;
#if 1 // Added by Priyank
int rand_gran = 1;
int num_roots = 8;
string map_file = "";
#endif
//*****START FRAMEWORK*****
typedef uint32_t flags;
const flags no_output = 1;
const flags pack_edges = 2;
const flags sparse_no_filter = 4;
const flags dense_forward = 8;
const flags dense_parallel = 16;
const flags remove_duplicates = 32;
inline bool should_output(const flags& fl) { return !(fl & no_output); }
const int dynChunkSz = 64; //chunk size for openmp's dynamic scheduling
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapDense(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) {
using D = tuple<bool, data>;
long n = GA.n;
vertex *G = GA.V;
if (should_output(fl)) {
D* next = newA(D, n);
auto g = get_emdense_gen<data>(next);
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long v=0; v<n; v++) {
std::get<0>(next[v]) = 0;
if (f.cond(v)) {
G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel);
}
}
return vertexSubsetData<data>(n, next);
} else {
auto g = get_emdense_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long v=0; v<n; v++) {
if (f.cond(v)) {
G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel);
}
}
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapDenseForward(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) {
using D = tuple<bool, data>;
long n = GA.n;
vertex *G = GA.V;
if (should_output(fl)) {
D* next = newA(D, n);
auto g = get_emdense_forward_gen<data>(next);
parallel_for(long i=0;i<n;i++) { std::get<0>(next[i]) = 0; }
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long i=0; i<n; i++) {
if (vertexSubset.isIn(i)) {
G[i].decodeOutNgh(i, f, g);
}
}
return vertexSubsetData<data>(n, next);
} else {
auto g = get_emdense_forward_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long i=0; i<n; i++) {
if (vertexSubset.isIn(i)) {
G[i].decodeOutNgh(i, f, g);
}
}
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapSparse(graph<vertex>& GA, vertex* frontierVertices, VS& indices,
uintT* degrees, uintT m, F &f, const flags fl) {
using S = tuple<uintE, data>;
long n = indices.n;
S* outEdges;
long outEdgeCount = 0;
if (should_output(fl)) {
uintT* offsets = degrees;
outEdgeCount = sequence::plusScan(offsets, offsets, m);
outEdges = newA(S, outEdgeCount);
auto g = get_emsparse_gen<data>(outEdges);
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (size_t i = 0; i < m; i++) {
uintT v = indices.vtx(i), o = offsets[i];
vertex vert = frontierVertices[i];
vert.decodeOutNghSparse(v, o, f, g);
}
} else {
auto g = get_emsparse_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (size_t i = 0; i < m; i++) {
uintT v = indices.vtx(i);
vertex vert = frontierVertices[i];
vert.decodeOutNghSparse(v, 0, f, g);
}
}
if (should_output(fl)) {
S* nextIndices = newA(S, outEdgeCount);
if (fl & remove_duplicates) {
if (GA.flags == NULL) {
GA.flags = newA(uintE, n);
parallel_for(long i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; }
}
auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(outEdges[i]); };
remDuplicates(get_key, GA.flags, outEdgeCount, n);
}
auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; };
size_t nextM = pbbs::filterf(outEdges, nextIndices, outEdgeCount, p);
free(outEdges);
return vertexSubsetData<data>(n, nextM, nextIndices);
} else {
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapSparse_no_filter(graph<vertex>& GA,
vertex* frontierVertices, VS& indices, uintT* offsets, uintT m, F& f,
const flags fl) {
using S = tuple<uintE, data>;
long n = indices.n;
long outEdgeCount = sequence::plusScan(offsets, offsets, m);
S* outEdges = newA(S, outEdgeCount);
auto g = get_emsparse_no_filter_gen<data>(outEdges);
// binary-search into scan to map workers->chunks
size_t b_size = 10000;
size_t n_blocks = nblocks(outEdgeCount, b_size);
uintE* cts = newA(uintE, n_blocks+1);
size_t* block_offs = newA(size_t, n_blocks+1);
auto offsets_m = make_in_imap<uintT>(m, [&] (size_t i) { return offsets[i]; });
auto lt = [] (const uintT& l, const uintT& r) { return l < r; };
parallel_for(size_t i=0; i<n_blocks; i++) {
size_t s_val = i*b_size;
block_offs[i] = pbbs::binary_search(offsets_m, s_val, lt);
}
block_offs[n_blocks] = m;
#pragma omp parallel for schedule (dynamic, dynChunkSz / 8)
for (size_t i=0; i<n_blocks; i++) {
if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) {
// start and end are offsets in [m]
size_t start = block_offs[i];
size_t end = block_offs[i+1];
uintT start_o = offsets[start];
uintT k = start_o;
for (size_t j=start; j<end; j++) {
uintE v = indices.vtx(j);
size_t num_in = frontierVertices[j].decodeOutNghSparseSeq(v, k, f, g);
k += num_in;
}
cts[i] = (k - start_o);
} else {
cts[i] = 0;
}
}
long outSize = sequence::plusScan(cts, cts, n_blocks);
cts[n_blocks] = outSize;
S* out = newA(S, outSize);
parallel_for (size_t i=0; i<n_blocks; i++) {
if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) {
size_t start = block_offs[i];
size_t start_o = offsets[start];
size_t out_off = cts[i];
size_t block_size = cts[i+1] - out_off;
for (size_t j=0; j<block_size; j++) {
out[out_off + j] = outEdges[start_o + j];
}
}
}
free(outEdges); free(cts); free(block_offs);
if (fl & remove_duplicates) {
if (GA.flags == NULL) {
GA.flags = newA(uintE, n);
parallel_for(size_t i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; }
}
auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(out[i]); };
remDuplicates(get_key, GA.flags, outSize, n);
S* nextIndices = newA(S, outSize);
auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; };
size_t nextM = pbbs::filterf(out, nextIndices, outSize, p);
free(out);
return vertexSubsetData<data>(n, nextM, nextIndices);
}
return vertexSubsetData<data>(n, outSize, out);
}
// Decides on sparse or dense base on number of nonzeros in the active vertices.
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapData(graph<vertex>& GA, VS &vs, F f,
intT threshold = -1, const flags& fl=0) {
long numVertices = GA.n, numEdges = GA.m, m = vs.numNonzeros();
if(threshold == -1) threshold = numEdges/20; //default threshold
vertex *G = GA.V;
if (numVertices != vs.numRows()) {
cout << "edgeMap: Sizes Don't match" << endl;
abort();
}
if (vs.size() == 0) return vertexSubsetData<data>(numVertices);
vs.toSparse();
uintT* degrees = newA(uintT, m);
vertex* frontierVertices = newA(vertex,m);
{parallel_for (size_t i=0; i < m; i++) {
uintE v_id = vs.vtx(i);
vertex v = G[v_id];
degrees[i] = v.getOutDegree();
frontierVertices[i] = v;
}}
uintT outDegrees = sequence::plusReduce(degrees, m);
if (outDegrees == 0) return vertexSubsetData<data>(numVertices);
if (m + outDegrees > threshold) {
vs.toDense();
free(degrees); free(frontierVertices);
return (fl & dense_forward) ?
edgeMapDenseForward<data, vertex, VS, F>(GA, vs, f, fl) :
edgeMapDense<data, vertex, VS, F>(GA, vs, f, fl);
} else {
auto vs_out =
(should_output(fl) && fl & sparse_no_filter) ? // only call snof when we output
edgeMapSparse_no_filter<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl) :
edgeMapSparse<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl);
free(degrees); free(frontierVertices);
return vs_out;
}
}
// Regular edgeMap, where no extra data is stored per vertex.
template <class vertex, class VS, class F>
vertexSubset edgeMap(graph<vertex> GA, VS& vs, F f,
intT threshold = -1, const flags& fl=0) {
return edgeMapData<pbbs::empty>(GA, vs, f, threshold, fl);
}
/* General function to print stats about frontier size */
template <class VS>
void frontierStats(VS& vs, long numVertices, bool KCore = false) {
if (KCore) {
double percent = (static_cast<double>(vs.size()) / static_cast<double>(numVertices)) * 100;
if (vs.dense()) {
std::cout << "PULL iteration. Frontier size = " << percent << std::endl;
}
else {
std::cout << "PUSH iteration. Frontier size = " << percent << std::endl;
}
}
return;
}
// Packs out the adjacency lists of all vertex in vs. A neighbor, ngh, is kept
// in the new adjacency list if p(ngh) is true.
// Weighted graphs are not yet supported, but this should be easy to do.
template <class vertex, class P>
vertexSubsetData<uintE> packEdges(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) {
using S = tuple<uintE, uintE>;
vs.toSparse();
vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows();
if (vs.size() == 0) {
return vertexSubsetData<uintE>(n);
}
auto degrees = array_imap<uintT>(m);
granular_for(i, 0, m, (m > 2000), {
uintE v = vs.vtx(i);
degrees[i] = G[v].getOutDegree();
});
long outEdgeCount = pbbs::scan_add(degrees, degrees);
S* outV;
if (should_output(fl)) {
outV = newA(S, vs.size());
}
bool* bits = newA(bool, outEdgeCount);
uintE* tmp1 = newA(uintE, outEdgeCount);
uintE* tmp2 = newA(uintE, outEdgeCount);
if (should_output(fl)) {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t offset = degrees[i];
auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]);
auto tmp2Off = &(tmp2[offset]);
size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off);
outV[i] = make_tuple(v, ct);
}
} else {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t offset = degrees[i];
auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]);
auto tmp2Off = &(tmp2[offset]);
size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off);
}
}
free(bits); free(tmp1); free(tmp2);
if (should_output(fl)) {
return vertexSubsetData<uintE>(n, m, outV);
} else {
return vertexSubsetData<uintE>(n);
}
}
template <class vertex, class P>
vertexSubsetData<uintE> edgeMapFilter(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) {
vs.toSparse();
if (fl & pack_edges) {
return packEdges<vertex, P>(GA, vs, p, fl);
}
vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows();
using S = tuple<uintE, uintE>;
if (vs.size() == 0) {
return vertexSubsetData<uintE>(n);
}
S* outV;
if (should_output(fl)) {
outV = newA(S, vs.size());
}
if (should_output(fl)) {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t ct = G[v].countOutNgh(v, p);
outV[i] = make_tuple(v, ct);
}
} else {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t ct = G[v].countOutNgh(v, p);
}
}
if (should_output(fl)) {
return vertexSubsetData<uintE>(n, m, outV);
} else {
return vertexSubsetData<uintE>(n);
}
}
//*****VERTEX FUNCTIONS*****
template <class F, class VS, typename std::enable_if<
!std::is_same<VS, vertexSubset>::value, int>::type=0 >
void vertexMap(VS& V, F f) {
size_t n = V.numRows(), m = V.numNonzeros();
if(V.dense()) {
parallel_for(long i=0;i<n;i++) {
if(V.isIn(i)) {
f(i, V.ithData(i));
}
}
} else {
parallel_for(long i=0;i<m;i++) {
f(V.vtx(i), V.vtxData(i));
}
}
}
template <class VS, class F, typename std::enable_if<
std::is_same<VS, vertexSubset>::value, int>::type=0 >
void vertexMap(VS& V, F f) {
size_t n = V.numRows(), m = V.numNonzeros();
if(V.dense()) {
parallel_for(long i=0;i<n;i++) {
if(V.isIn(i)) {
f(i);
}
}
} else {
parallel_for(long i=0;i<m;i++) {
f(V.vtx(i));
}
}
}
//Note: this is the version of vertexMap in which only a subset of the
//input vertexSubset is returned
template <class F>
vertexSubset vertexFilter(vertexSubset V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
V.toDense();
bool* d_out = newA(bool,n);
{parallel_for(long i=0;i<n;i++) d_out[i] = 0;}
{parallel_for(long i=0;i<n;i++)
if(V.d[i]) d_out[i] = filter(i);}
return vertexSubset(n,d_out);
}
template <class F>
vertexSubset vertexFilter2(vertexSubset V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
if (m == 0) {
return vertexSubset(n);
}
bool* bits = newA(bool, m);
V.toSparse();
{parallel_for(size_t i=0; i<m; i++) {
uintE v = V.vtx(i);
bits[i] = filter(v);
}}
auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); });
auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; });
auto out = pbbs::pack(v_imap, bits_m);
out.alloc = false;
free(bits);
return vertexSubset(n, out.size(), out.s);
}
template <class data, class F>
vertexSubset vertexFilter2(vertexSubsetData<data> V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
if (m == 0) {
return vertexSubset(n);
}
bool* bits = newA(bool, m);
V.toSparse();
parallel_for(size_t i=0; i<m; i++) {
auto t = V.vtxAndData(i);
bits[i] = filter(std::get<0>(t), std::get<1>(t));
}
auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); });
auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; });
auto out = pbbs::pack(v_imap, bits_m);
out.alloc = false;
free(bits);
return vertexSubset(n, out.size(), out.s);
}
#if 1 // Added by Priyank
void mergeTwoPreprocessingIndices(pvector<uintE>& first, pvector<uintE>& second, pvector<uintE>& new_ids) {
assert(first.size() == second.size());
assert(first.size() == new_ids.size());
long int numVertices = first.size();
{parallel_for(long i = 0 ; i < numVertices ; i++ ) {
new_ids[i] = second[first[i]];
}}
}
#endif
//cond function that always returns true
inline bool cond_true (intT d) { return 1; }
template<class vertex>
void Compute(graph<vertex>&, commandLine, pvector<uintE> &new_ids);
int parallel_main(int argc, char* argv[]) {
commandLine P(argc,argv," [-s] <inFile>");
char* iFile = P.getArgument(0);
bool symmetric = P.getOptionValue("-s");
bool compressed = P.getOptionValue("-c");
bool binary = P.getOptionValue("-b");
bool mmap = P.getOptionValue("-m");
bool isPageRank = (P.getOptionIntValue("-is_pagerank", -1) == 1);
bool isDenseWrite = (P.getOptionIntValue("-is_dense_write", -1) == 1);
long rounds = P.getOptionLongValue("-rounds",3);
#if 1 // Added by Priyank
int degree_used_for_reordering = P.getOptionIntValue("-degree_used_for_reordering", -1);
long threads = P.getOptionIntValue("-threads", omp_get_max_threads());
if ( threads != omp_get_max_threads() ) {
omp_set_num_threads(threads);
}
rand_gran = P.getOptionIntValue("-rand_gran", 1);
num_roots = P.getOptionIntValue("-num_roots", 8);
map_file = P.getOptionValue("-map_file", "");
string degree_used_for_reordering_str = "none";
enum ReorderingAlgo reordering_algo = (ReorderingAlgo) P.getOptionLongValue("-reordering_algo", DBG);
if ( degree_used_for_reordering == 0 || degree_used_for_reordering == 1 ) {
if ( reordering_algo != ORIGINAL ) {
degree_used_for_reordering_str = (degree_used_for_reordering == 0) ? "out-degree" : "in-degree";
} else {
degree_used_for_reordering_str = "none";
}
} else {
reordering_algo = ORIGINAL;
}
if ( reordering_algo >= MAP ) {
assert(map_file != "");
}
cout << "============ PARAMETERS ==========" << endl;
cout << "threads: " << threads << " omp_get_max_threads: " << omp_get_max_threads() << endl;
cout << "num_roots: " << num_roots << endl;
cout << "map_file: " << map_file << endl;
cout << "rand_gran: " << rand_gran << endl;
cout << "reordering_algo: " << reordering_algo << endl;
cout << "reordering_algo_str: " << ReorderingAlgoStr(reordering_algo) << endl;
cout << "degree_used_for_reordering: " << degree_used_for_reordering << endl;
cout << "degree_used_for_reordering_str: " << degree_used_for_reordering_str << endl;
cout << "is_dense_write: " << isDenseWrite << endl;
cout << "is_pagerank: " << isPageRank << endl;
cout << "==================================" << endl;
#endif
if (symmetric) {
graph<symmetricVertex> G =
readGraph<symmetricVertex>(iFile,compressed,symmetric,binary,mmap); //symmetric graph
pvector<uintE> new_ids(G.n, UINT_E_MAX);
if ( reordering_algo != ORIGINAL ) {
#if 1 // Added by Priyank
graph<symmetricVertex> newG;
if ( reordering_algo > MAP ) {
pvector<uintE> new_ids1(G.n, UINT_E_MAX);
pvector<uintE> new_ids2(G.n, UINT_E_MAX);
graph<symmetricVertex> newG1 = preprocessGraph<symmetricVertex>(G, symmetric, (degree_used_for_reordering == 0), new_ids1, false, false, MAP);
newG = preprocessGraph<symmetricVertex>(newG1, symmetric, (degree_used_for_reordering == 0), new_ids2, false, false, (ReorderingAlgo)(reordering_algo - MAP));
mergeTwoPreprocessingIndices(new_ids1, new_ids2, new_ids);
newG1.del();
} else {
newG = preprocessGraph<symmetricVertex>(G, symmetric, (degree_used_for_reordering == 0), new_ids, false, false, reordering_algo);
}
G.del();
Compute(newG,P,new_ids);
for(int r=0;r<rounds;r++) {
Compute(newG,P,new_ids);
}
newG.del();
#endif
}
else {
Compute(G,P,new_ids);
for(int r=0;r<rounds;r++) {
Compute(G,P,new_ids);
}
G.del();
}
} else {
graph<asymmetricVertex> G =
readGraph<asymmetricVertex>(iFile,compressed,symmetric,binary,mmap); //asymmetric graph
pvector<uintE> new_ids(G.n, UINT_E_MAX);
if ( reordering_algo != ORIGINAL ) {
#if 1 // Added by Priyank
graph<asymmetricVertex> newG;
if ( reordering_algo > MAP ) {
pvector<uintE> new_ids1(G.n, UINT_E_MAX);
pvector<uintE> new_ids2(G.n, UINT_E_MAX);
graph<asymmetricVertex> newG1 = preprocessGraph<asymmetricVertex>(G, symmetric, (degree_used_for_reordering == 0), new_ids1, isPageRank, isDenseWrite, MAP);
newG = preprocessGraph<asymmetricVertex>(newG1, symmetric, (degree_used_for_reordering == 0), new_ids2, isPageRank, isDenseWrite, (ReorderingAlgo)(reordering_algo - MAP));
mergeTwoPreprocessingIndices(new_ids1, new_ids2, new_ids);
newG1.del();
} else {
newG = preprocessGraph<asymmetricVertex>(G, symmetric, (degree_used_for_reordering == 0), new_ids, isPageRank, isDenseWrite, reordering_algo);
}
G.del();
Compute(newG,P,new_ids);
if(newG.transposed) newG.transpose();
for(int r=0;r<rounds;r++) {
Compute(newG,P,new_ids);
if(newG.transposed) newG.transpose();
}
newG.del();
#endif
}
else {
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
for(int r=0;r<rounds;r++) {
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
}
G.del();
}
}
}
#endif
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include "core/common/optional.h"
#include <functional>
#include <memory>
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
struct ThreadPoolDevice;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
template <typename Environment>
class ThreadPoolTempl;
namespace concurrency {
class ThreadPool {
public:
// Scheduling strategies for ParallelFor. The strategy governs how the given
// units of work are distributed among the available threads in the
// threadpool.
enum class SchedulingStrategy {
// The Adaptive scheduling strategy adaptively chooses the shard sizes based
// on the cost of each unit of work, and the cost model of the underlying
// threadpool device.
//
// The 'cost_per_unit' is an estimate of the number of CPU cycles (or
// nanoseconds if not CPU-bound) to complete a unit of work. Overestimating
// creates too many shards and CPU time will be dominated by per-shard
// overhead, such as Context creation. Underestimating may not fully make
// use of the specified parallelism, and may also cause inefficiencies due
// to load balancing issues and stragglers.
kAdaptive,
// The Fixed Block Size scheduling strategy shards the given units of work
// into shards of fixed size. In case the total number of units is not
// evenly divisible by 'block_size', at most one of the shards may be of
// smaller size. The exact number of shards may be found by a call to
// NumShardsUsedByFixedBlockSizeScheduling.
//
// Each shard may be executed on a different thread in parallel, depending
// on the number of threads available in the pool. Note that when there
// aren't enough threads in the pool to achieve full parallelism, function
// calls will be automatically queued.
kFixedBlockSize
};
// Contains additional parameters for either the Adaptive or the Fixed Block
// Size scheduling strategy.
class SchedulingParams {
public:
explicit SchedulingParams(SchedulingStrategy strategy, optional<int64_t> cost_per_unit,
optional<std::ptrdiff_t> block_size)
: strategy_(strategy), cost_per_unit_(cost_per_unit), block_size_(block_size) {
}
SchedulingStrategy strategy() const {
return strategy_;
}
optional<int64_t> cost_per_unit() const {
return cost_per_unit_;
}
optional<std::ptrdiff_t> block_size() const {
return block_size_;
}
private:
// The underlying Scheduling Strategy for which this instance contains
// additional parameters.
SchedulingStrategy strategy_;
// The estimated cost per unit of work in number of CPU cycles (or
// nanoseconds if not CPU-bound). Only applicable for Adaptive scheduling
// strategy.
optional<int64_t> cost_per_unit_;
// The block size of each shard. Only applicable for Fixed Block Size
// scheduling strategy.
optional<std::ptrdiff_t> block_size_;
};
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool that contains "num_threads" threads with specified
// "name". env->StartThread() is used to create individual threads with the
// given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: num_threads > 0
// The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes.
ThreadPool(Env* env, const ThreadOptions& thread_options, const NAME_CHAR_TYPE* name, int num_threads,
bool low_latency_hint, Eigen::Allocator* allocator = nullptr);
// Constructs a pool that wraps around the thread::ThreadPoolInterface
// instance provided by the caller. Caller retains ownership of
// `user_threadpool` and must ensure its lifetime is longer than the
// ThreadPool instance.
ThreadPool(Eigen::ThreadPoolInterface* user_threadpool, Eigen::Allocator* allocator);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Schedules fn() for execution in the pool of threads.
void Schedule(std::function<void()> fn);
// Returns the number of shards used by ParallelForFixedBlockSizeScheduling
// with these parameters.
int NumShardsUsedByFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size);
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
if (tp == nullptr) {
fn(0, total);
return;
}
tp->ParallelFor(total, cost_per_unit, fn);
}
// Similar to ParallelFor above, but takes the specified scheduling strategy
// into account.
void
ParallelFor(std::ptrdiff_t total, const SchedulingParams& scheduling_params,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const SchedulingParams& scheduling_params,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn) {
if (tp == nullptr) {
fn(0, total);
return;
}
tp->ParallelFor(total, scheduling_params, fn);
}
// Prefer using this API to get the number of threads unless you know what you're doing.
// This API takes into account if openmp is enabled/disabled and if the thread pool ptr is nullptr.
static int NumThreads(const concurrency::ThreadPool* tp);
// Returns the number of threads in the pool. Preferably use the static version of this API instead.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// If ThreadPool implementation is compatible with Eigen::ThreadPoolInterface,
// returns a non-null pointer. The caller does not own the object the returned
// pointer points to, and should not attempt to delete.
Eigen::ThreadPoolInterface* AsEigenThreadPool() const;
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
void SimpleParallelFor(std::ptrdiff_t total, std::function<void(std::ptrdiff_t)> fn);
#ifdef _OPENMP
template <typename F>
inline static void TryBatchParallelFor(ThreadPool*, std::ptrdiff_t total, F&& fn, std::ptrdiff_t /*num_batches*/) {
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
}
#else
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of NumThreads().
*\param fn A std::function or STL style functor with signature of "void f(int32_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<ptrdiff_t>(total, tp->NumThreads());
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
std::ptrdiff_t start, work_remaining;
PartitionWork(batch_index, num_batches, total, &start, &work_remaining);
std::ptrdiff_t end = start + work_remaining;
for (std::ptrdiff_t i = start; i < end; i++) {
fn(i);
}
});
}
#endif
#ifndef _OPENMP
//Deprecated. Please avoid using Eigen Tensor because it will blow up binary size quickly.
Eigen::ThreadPoolDevice& Device() {
return *threadpool_device_;
}
#endif
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
private:
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size).
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
ThreadOptions thread_options_;
// underlying_threadpool_ is the user_threadpool if user_threadpool is
// provided in the constructor. Otherwise it is the eigen_threadpool_.
Eigen::ThreadPoolInterface* underlying_threadpool_;
// eigen_threadpool_ is instantiated and owned by thread::ThreadPool if
// user_threadpool is not in the constructor.
std::unique_ptr<ThreadPoolTempl<Env>> eigen_threadpool_;
#ifndef _OPENMP
std::unique_ptr<Eigen::ThreadPoolDevice> threadpool_device_;
#endif
// Copied from MlasPartitionWork
static void PartitionWork(std::ptrdiff_t ThreadId, std::ptrdiff_t ThreadCount, std::ptrdiff_t TotalWork,
std::ptrdiff_t* WorkIndex, std::ptrdiff_t* WorkRemaining) {
const std::ptrdiff_t WorkPerThread = TotalWork / ThreadCount;
const std::ptrdiff_t WorkPerThreadExtra = TotalWork % ThreadCount;
if (ThreadId < WorkPerThreadExtra) {
*WorkIndex = (WorkPerThread + 1) * ThreadId;
*WorkRemaining = WorkPerThread + 1;
} else {
*WorkIndex = WorkPerThread * ThreadId + WorkPerThreadExtra;
*WorkRemaining = WorkPerThread;
}
}
};
} // namespace concurrency
} // namespace onnxruntime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.